code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff
| 67
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase__ ( A_: int , A_: Optional[Any] , A_: List[str]=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__UpperCAmelCase =random.Random()
__UpperCAmelCase =1
for dim in shape:
total_dims *= dim
__UpperCAmelCase =[]
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase =np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase__ ( A_: List[str] , A_: List[str]=None ) -> Any:
"""simple docstring"""
__UpperCAmelCase =ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase =1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = ()
def _a ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase =2
__UpperCAmelCase =inputs["""input_ids"""].shape[-1] // 2
__UpperCAmelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
__UpperCAmelCase =jnp.ones_like(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =pt_model_class(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , flax_model.params )
__UpperCAmelCase =flax_model.generate(__SCREAMING_SNAKE_CASE ).sequences
__UpperCAmelCase =pt_model.generate(torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =False
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _a ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =True
__UpperCAmelCase =max_length
__UpperCAmelCase =0.8
__UpperCAmelCase =10
__UpperCAmelCase =0.3
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Optional[int] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
__UpperCAmelCase =max_length
__UpperCAmelCase =2
__UpperCAmelCase =1
__UpperCAmelCase =8
__UpperCAmelCase =9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =False
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =True
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase =attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase =2
__UpperCAmelCase =max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase =model_class(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model.generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertEqual(generation_outputs.shape[-1] , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =jit(model.generate )
__UpperCAmelCase =jit_generate(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__UpperCAmelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__UpperCAmelCase ="""Hello world"""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """do_samples""" ):
model.generate(__SCREAMING_SNAKE_CASE , do_samples=__SCREAMING_SNAKE_CASE )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """foo""" ):
__UpperCAmelCase ={"""foo""": """bar"""}
model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 68
| 0
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
_lowercase = [0] * no_of_processes
_lowercase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowerCamelCase ):
_lowercase = burst_time[i]
_lowercase = 0
_lowercase = 0
_lowercase = 999999999
_lowercase = 0
_lowercase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowerCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_lowercase = remaining_time[j]
_lowercase = j
_lowercase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_lowercase = remaining_time[short]
if minm == 0:
_lowercase = 999999999
if remaining_time[short] == 0:
complete += 1
_lowercase = False
# Find finish time of current process
_lowercase = increment_time + 1
# Calculate waiting time
_lowercase = finish_time - arrival_time[short]
_lowercase = finar - burst_time[short]
if waiting_time[short] < 0:
_lowercase = 0
# Increment time
increment_time += 1
return waiting_time
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
_lowercase = [0] * no_of_processes
for i in range(_lowerCamelCase ):
_lowercase = burst_time[i] + waiting_time[i]
return turn_around_time
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
_lowercase = 0
_lowercase = 0
for i in range(_lowerCamelCase ):
_lowercase = total_waiting_time + waiting_time[i]
_lowercase = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
_lowerCamelCase = int(input())
_lowerCamelCase = [0] * no_of_processes
_lowerCamelCase = [0] * no_of_processes
_lowerCamelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
_lowerCamelCase, _lowerCamelCase = map(int, input().split())
_lowerCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowerCamelCase = burst_time
_lowerCamelCase = no_of_processes
_lowerCamelCase = waiting_time
_lowerCamelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_lowerCamelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 717
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
__SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=0.999 , snake_case_="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase = []
for i in range(snake_case_ ):
_lowercase = i / num_diffusion_timesteps
_lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __a ( _snake_case ,_snake_case ):
@register_to_config
def __init__( self : Tuple , lowercase__ : int = 10_00 , lowercase__ : str = "fixed_small_log" , lowercase__ : bool = True , lowercase__ : Optional[float] = 1.0 , lowercase__ : str = "epsilon" , lowercase__ : str = "squaredcos_cap_v2" , ) ->Optional[Any]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""")
_lowercase = betas_for_alpha_bar(lowercase__)
_lowercase = 1.0 - self.betas
_lowercase = torch.cumprod(self.alphas , dim=0)
_lowercase = torch.tensor(1.0)
# standard deviation of the initial noise distribution
_lowercase = 1.0
# setable values
_lowercase = None
_lowercase = torch.from_numpy(np.arange(0 , lowercase__)[::-1].copy())
_lowercase = variance_type
def _UpperCAmelCase ( self : Optional[Any] , lowercase__ : torch.FloatTensor , lowercase__ : Optional[int] = None) ->torch.FloatTensor:
"""simple docstring"""
return sample
def _UpperCAmelCase ( self : List[str] , lowercase__ : int , lowercase__ : Union[str, torch.device] = None) ->List[str]:
"""simple docstring"""
_lowercase = num_inference_steps
_lowercase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowercase = (np.arange(0 , lowercase__) * step_ratio).round()[::-1].copy().astype(np.intaa)
_lowercase = torch.from_numpy(lowercase__).to(lowercase__)
def _UpperCAmelCase ( self : int , lowercase__ : Optional[Any] , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : int=None) ->Tuple:
"""simple docstring"""
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowercase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowercase = torch.log(torch.clamp(lowercase__ , min=1e-20))
_lowercase = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowercase = variance.log()
_lowercase = beta.log()
_lowercase = (predicted_variance + 1) / 2
_lowercase = frac * max_log + (1 - frac) * min_log
return variance
def _UpperCAmelCase ( self : int , lowercase__ : torch.FloatTensor , lowercase__ : int , lowercase__ : torch.FloatTensor , lowercase__ : Optional[int] = None , lowercase__ : Any=None , lowercase__ : bool = True , ) ->Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
_lowercase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowercase , _lowercase = torch.split(lowercase__ , sample.shape[1] , dim=1)
else:
_lowercase = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowercase = t - 1
_lowercase = self.alphas_cumprod[t]
_lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase = 1 - alpha_prod_t
_lowercase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase = self.betas[t]
_lowercase = self.alphas[t]
else:
_lowercase = 1 - alpha_prod_t / alpha_prod_t_prev
_lowercase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowercase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowercase = torch.clamp(
lowercase__ , -self.config.clip_sample_range , self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowercase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowercase = 0
if t > 0:
_lowercase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowercase__ , device=model_output.device)
_lowercase = self._get_variance(
lowercase__ , predicted_variance=lowercase__ , prev_timestep=lowercase__ , )
if self.variance_type == "fixed_small_log":
_lowercase = variance
elif self.variance_type == "learned_range":
_lowercase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""")
_lowercase = variance * variance_noise
_lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowercase__ , pred_original_sample=lowercase__)
def _UpperCAmelCase ( self : Dict , lowercase__ : torch.FloatTensor , lowercase__ : torch.FloatTensor , lowercase__ : torch.IntTensor , ) ->torch.FloatTensor:
"""simple docstring"""
_lowercase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype)
_lowercase = timesteps.to(original_samples.device)
_lowercase = alphas_cumprod[timesteps] ** 0.5
_lowercase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
_lowercase = sqrt_alpha_prod.unsqueeze(-1)
_lowercase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
_lowercase = sqrt_one_minus_alpha_prod.unsqueeze(-1)
_lowercase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 572
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : Tuple = '''BlipImageProcessor'''
UpperCamelCase_ : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : int = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase_ : ImageInput = None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : str , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE : str = self.tokenizer
SCREAMING_SNAKE_CASE : int = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE : Tuple = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
SCREAMING_SNAKE_CASE : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def _A ( self : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 62
|
import logging
import os
from .state import PartialState
class UpperCAmelCase_ ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def _A ( _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _A ( self , _A , _A , *_A , **_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('main_process_only' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('in_order' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
__SCREAMING_SNAKE_CASE = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def __lowercase ( a__ , a__ = None ) -> Optional[Any]:
if log_level is None:
__SCREAMING_SNAKE_CASE = os.environ.get('ACCELERATE_LOG_LEVEL' , a__ )
__SCREAMING_SNAKE_CASE = logging.getLogger(a__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(a__ , {} )
| 148
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class a__ ( a_ ):
def __init__( self , _a , _a ):
lowercase : List[str] = params
lowercase : List[str] = np.array(_a )
lowercase : str = np.array([len(_a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _a ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def __magic_name__ ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __magic_name__ ( self ):
lowercase : Tuple = self.params.max_model_input_size
lowercase : Dict = self.lengths > max_len
logger.info(f"""Splitting {sum(_a )} too long sequences.""" )
def divide_chunks(_a , _a ):
return [l[i : i + n] for i in range(0 , len(_a ) , _a )]
lowercase : List[str] = []
lowercase : str = []
if self.params.mlm:
lowercase , lowercase : Dict = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
lowercase , lowercase : List[str] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase : List[str] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase : Dict = np.insert(_a , 0 , _a )
if sub_s[-1] != sep_id:
lowercase : Tuple = np.insert(_a , len(_a ) , _a )
assert len(_a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_a )
new_tok_ids.extend(_a )
new_lengths.extend([len(_a ) for l in sub_seqs] )
lowercase : Optional[int] = np.array(_a )
lowercase : Any = np.array(_a )
def __magic_name__ ( self ):
lowercase : Dict = len(self )
lowercase : Optional[Any] = self.lengths > 11
lowercase : Optional[Any] = self.token_ids[indices]
lowercase : Union[str, Any] = self.lengths[indices]
lowercase : Union[str, Any] = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __magic_name__ ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase : Dict = self.params.special_tok_ids["unk_token"]
lowercase : Optional[Any] = len(self )
lowercase : Optional[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase : List[Any] = (unk_occs / self.lengths) < 0.5
lowercase : Union[str, Any] = self.token_ids[indices]
lowercase : str = self.lengths[indices]
lowercase : List[str] = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __magic_name__ ( self ):
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self , _a ):
lowercase : Optional[Any] = [t[0] for t in batch]
lowercase : Union[str, Any] = [t[1] for t in batch]
assert len(_a ) == len(_a )
# Max for paddings
lowercase : List[str] = max(_a )
# Pad token ids
if self.params.mlm:
lowercase : Dict = self.params.special_tok_ids["pad_token"]
else:
lowercase : List[str] = self.params.special_tok_ids["unk_token"]
lowercase : Union[str, Any] = [list(t.astype(_a ) ) + [pad_idx] * (max_seq_len_ - len(_a )) for t in token_ids]
assert len(tk_ ) == len(_a )
assert all(len(_a ) == max_seq_len_ for t in tk_ )
lowercase : Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase : Union[str, Any] = torch.tensor(_a ) # (bs)
return tk_t, lg_t
| 518
|
"""simple docstring"""
from math import factorial
_A : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def __magic_name__ ( __snake_case : int ) -> int:
if not isinstance(__snake_case , __snake_case ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__snake_case ) )
def __magic_name__ ( __snake_case : int = 60 , __snake_case : int = 100_0000 ) -> int:
if not isinstance(__snake_case , __snake_case ) or not isinstance(__snake_case , __snake_case ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
lowercase : Union[str, Any] = 0
# the cached sizes of the previous chains
lowercase : dict[int, int] = {}
for start_chain_element in range(1 , __snake_case ):
# The temporary set will contain the elements of the chain
lowercase : str = set()
lowercase : Dict = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowercase : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__snake_case )
chain_set_length += 1
lowercase : str = digit_factorial_sum(__snake_case )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowercase : str = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution()}")
| 518
| 1
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (lowercase_ ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 122
|
"""simple docstring"""
from math import sqrt
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
lowercase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(__SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase_ : Union[str, Any] = False
break
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase_ : int = list(range(2 , n + 1 ) )
lowercase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(__SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase_ : List[str] = 0
# filters actual prime numbers.
lowercase_ : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
lowercase_ : Optional[int] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__SCREAMING_SNAKE_CASE ):
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
lowercase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowercase_ : Union[str, Any] = 2
lowercase_ : int = number
if number == 0 or number == 1:
ans.append(__SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(__SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(__SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowercase_ : Tuple = prime_factorization(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = max(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase_ : str = 0
# prime factorization of 'number'
lowercase_ : str = prime_factorization(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = min(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(__SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
lowercase_ : Optional[int] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase_ : Dict = get_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : int = len(__SCREAMING_SNAKE_CASE )
# run variable for while-loops.
lowercase_ : str = 0
lowercase_ : str = None
# exit variable. for break up the loops
lowercase_ : str = True
while i < len_pn and loop:
lowercase_ : Dict = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase_ : List[str] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (len(__SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase_ : Tuple = 0
while numbera != 0:
lowercase_ : List[str] = numbera % numbera
lowercase_ : str = numbera
lowercase_ : int = rest
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase_ : int = prime_factorization(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = prime_factorization(__SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
lowercase_ : str = []
lowercase_ : Optional[int] = []
lowercase_ : int = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : List[Any] = 0
lowercase_ : int = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase_ : Tuple = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
lowercase_ : int = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
lowercase_ : Dict = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE ):
ans *= n
done.append(__SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase_ : Any = prime_fac_a.count(__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE ):
ans *= n
done.append(__SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
lowercase_ : List[Any] = 0
lowercase_ : Any = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and is_prime(
__SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
assert (
is_prime(__SCREAMING_SNAKE_CASE ) and is_prime(__SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowercase_ : int = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(__SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(__SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(__SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
lowercase_ : str = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(__SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase_ : Any = get_divisors(__SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(__SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase_ : List[Any] = gcd(abs(__SCREAMING_SNAKE_CASE ) , abs(__SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
lowercase_ : Union[str, Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
lowercase_ : List[Any] = 0
lowercase_ : Optional[int] = 1
lowercase_ : int = 1 # this will be return
for _ in range(n - 1 ):
lowercase_ : Optional[Any] = ans
ans += fiba
lowercase_ : List[Any] = tmp
return ans
| 425
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = {}
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__magic_name__ = super().add_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=1 , **UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
__magic_name__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
else:
__magic_name__ = []
for i in range(UpperCamelCase__ ):
__magic_name__ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
__magic_name__ = output
def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=1.0 ) -> Optional[int]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = []
for i in range(len(UpperCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__magic_name__ = self.token_map[placeholder_token]
__magic_name__ = tokens[: 1 + int(len(UpperCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
__magic_name__ = copy.copy(UpperCamelCase__ )
random.shuffle(UpperCamelCase__ )
__magic_name__ = text.replace(UpperCamelCase__ , """ """.join(UpperCamelCase__ ) )
return text
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , *UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=1.0 , **UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
def _lowercase ( self : int , UpperCamelCase__ : List[str] , *UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Union[str, Any]=1.0 , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
| 76
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_A ):
'''simple docstring'''
a__ = ["""note_seq"""]
def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 76
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self ):
snake_case = []
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_init_end''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_train_begin''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_train_end''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_epoch_begin''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_epoch_end''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_step_begin''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_step_end''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_evaluate''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_predict''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_save''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_log''' )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.events.append('''on_prediction_step''' )
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = tempfile.mkdtemp()
def a_ ( self ):
shutil.rmtree(self.output_dir )
def a_ ( self , __snake_case=0 , __snake_case=0 , __snake_case=6_4 , __snake_case=6_4 , __snake_case=None , __snake_case=False , **__snake_case ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case = RegressionDataset(length=__snake_case )
snake_case = RegressionDataset(length=__snake_case )
snake_case = RegressionModelConfig(a=__snake_case , b=__snake_case )
snake_case = RegressionPreTrainedModel(__snake_case )
snake_case = TrainingArguments(self.output_dir , disable_tqdm=__snake_case , report_to=[] , **__snake_case )
return Trainer(
__snake_case , __snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , callbacks=__snake_case , )
def a_ ( self , __snake_case , __snake_case ):
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
# Order doesn't matter
snake_case = sorted(__snake_case , key=lambda __snake_case : cb.__name__ if isinstance(__snake_case , __snake_case ) else cb.__class__.__name__ )
snake_case = sorted(__snake_case , key=lambda __snake_case : cb.__name__ if isinstance(__snake_case , __snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(__snake_case , __snake_case ):
if isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ) and not isinstance(__snake_case , __snake_case ):
self.assertEqual(__snake_case , cba.__class__ )
elif not isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case ):
self.assertEqual(cba.__class__ , __snake_case )
else:
self.assertEqual(__snake_case , __snake_case )
def a_ ( self , __snake_case ):
snake_case = ['''on_init_end''', '''on_train_begin''']
snake_case = 0
snake_case = len(trainer.get_eval_dataloader() )
snake_case = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(__snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def a_ ( self ):
snake_case = self.get_trainer()
snake_case = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
# Callbacks passed at init are added to the default callbacks
snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case = self.get_trainer(disable_tqdm=__snake_case )
snake_case = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
def a_ ( self ):
snake_case = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__snake_case )
expected_callbacks.remove(__snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
snake_case = self.get_trainer()
snake_case = trainer.pop_callback(__snake_case )
self.assertEqual(cb.__class__ , __snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
trainer.add_callback(__snake_case )
expected_callbacks.insert(0 , __snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
# We can also add, pop, or remove by instance
snake_case = self.get_trainer()
snake_case = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__snake_case )
expected_callbacks.remove(__snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
snake_case = self.get_trainer()
snake_case = trainer.callback_handler.callbacks[0]
snake_case = trainer.pop_callback(__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
trainer.add_callback(__snake_case )
expected_callbacks.insert(0 , __snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __snake_case )
def a_ ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=__snake_case )
snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
# Independent log/save/eval
snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
# A bit of everything
snake_case = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__snake_case , self.get_expected_events(__snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__snake_case ) in warn_mock.call_args[0][0]
| 550
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
| 550
| 1
|
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( a : List[str] , a : Optional[int]=7 ) -> Optional[int]:
"""simple docstring"""
lowercase_ : str = None
if token is not None:
lowercase_ : Dict = {'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
lowercase_ : Optional[int] = '636036'
lowercase_ : Union[str, Any] = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
lowercase_ : Any = requests.get(a , headers=a ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( a : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = get_daily_ci_runs(a )
lowercase_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase_ : Optional[Any] = workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( a : Optional[Any] , a : int , a : List[str] ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = get_last_daily_ci_runs(a )
if workflow_run_id is not None:
lowercase_ : List[str] = get_artifacts_links(worflow_run_id=a , token=a )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase_ : List[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=a , artifact_url=a , output_dir=a , token=a )
def _UpperCAmelCase ( a : List[Any] , a : Optional[int] , a : str ) -> Dict:
"""simple docstring"""
get_last_daily_ci_artifacts(a , a , a )
lowercase_ : Tuple = {}
for artifact_name in artifact_names:
lowercase_ : Tuple = os.path.join(a , f"{artifact_name}.zip" )
if os.path.isfile(a ):
lowercase_ : str = {}
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
with z.open(a ) as f:
lowercase_ : Tuple = f.read().decode('UTF-8' )
return results
| 7
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ = test_metrics
@require_cpu
def lowercase_ ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowercase_ ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowercase_ ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def lowercase_ ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
A__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 337
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=True , UpperCamelCase__=5_12 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = weight_tying
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def lowercase_ ( self ):
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = GPTNeoXJapaneseModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = True
A__ = GPTNeoXJapaneseModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = True
A__ = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([input_mask, next_mask] , dim=-1 )
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
A__ = output_from_no_past["hidden_states"][0]
A__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowercase__ : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowercase__ : Optional[Any] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowercase__ : Any = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : str = False
def lowercase_ ( self ):
'''simple docstring'''
A__ = GPTNeoXJapaneseModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = "abeja/gpt-neox-japanese-2.7b"
A__ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
A__ = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
A__ = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
A__ = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase__ )
A__ = []
for prompt in prompts:
A__ = tokenizer(UpperCamelCase__ , return_tensors="pt" ).input_ids
A__ = model.generate(UpperCamelCase__ , max_length=50 )
A__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 337
| 1
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
a__ : Dict = 'bart'
a__ : Dict = True
@st.cache(allow_output_mutation=__A )
def _UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCamelCase__ = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
UpperCamelCase__ = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
UpperCamelCase__ = qar_model.eval()
else:
UpperCamelCase__ , UpperCamelCase__ = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase__ = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
UpperCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
UpperCamelCase__ = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
UpperCamelCase__ = sas_model.eval()
else:
UpperCamelCase__ , UpperCamelCase__ = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__A )
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCamelCase__ = faiss.StandardGpuResources()
UpperCamelCase__ = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
UpperCamelCase__ = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCamelCase__ = faiss.IndexFlatIP(128 )
UpperCamelCase__ = faiss.index_cpu_to_gpu(__A , 1 , __A )
wikiaab_gpu_index_flat.add(__A ) # TODO fix for larger GPU
else:
UpperCamelCase__ , UpperCamelCase__ = (None, None)
UpperCamelCase__ = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__A )
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = datasets.load_dataset("eli5" , name="LFQA_reddit" )
UpperCamelCase__ = elia["train_eli5"]
UpperCamelCase__ = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
UpperCamelCase__ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__A )
return (elia_train, eli5_train_q_index)
a__ , a__ , a__ : int = load_indexes()
a__ , a__ , a__ , a__ : Tuple = load_models()
a__ , a__ : Optional[Any] = load_train_data()
def _UpperCamelCase ( __A , __A=10 ) -> Any:
'''simple docstring'''
UpperCamelCase__ = embed_questions_for_retrieval([question] , __A , __A )
UpperCamelCase__ , UpperCamelCase__ = eli5_train_q_index.search(__A , __A )
UpperCamelCase__ = [elia_train[int(__A )] for i in I[0]]
return nn_examples
def _UpperCamelCase ( __A , __A="wiki40b" , __A="dense" , __A=10 ) -> int:
'''simple docstring'''
if source == "none":
UpperCamelCase__ , UpperCamelCase__ = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCamelCase__ , UpperCamelCase__ = query_qa_dense_index(
__A , __A , __A , __A , __A , __A )
else:
UpperCamelCase__ , UpperCamelCase__ = query_es_index(
__A , __A , index_name="english_wiki40b_snippets_100w" , n_results=__A , )
UpperCamelCase__ = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
UpperCamelCase__ = "question: {} context: {}".format(__A , __A )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __A : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __A : None),
} )
def _UpperCamelCase ( __A , __A , __A , __A=64 , __A=256 , __A=False , __A=2 , __A=0.95 , __A=0.8 ) -> Any:
'''simple docstring'''
with torch.no_grad():
UpperCamelCase__ = qa_sas_generate(
__A , __A , __A , num_answers=1 , num_beams=__A , min_len=__A , max_len=__A , do_sample=__A , temp=__A , top_p=__A , top_k=__A , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
a__ : List[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
a__ : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
a__ : Any = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
a__ : Optional[Any] = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
a__ : Dict = st.sidebar.checkbox('Demo options')
if demo_options:
a__ : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
a__ : Optional[int] = action_list.index(action_st)
a__ : List[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
a__ : List[Any] = show_type == 'Show full text of passages'
else:
a__ : int = 3
a__ : Optional[Any] = True
a__ : int = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
a__ : Optional[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
a__ : List[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
a__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
a__ : Dict = 'wiki40b'
a__ : Union[str, Any] = 'dense'
a__ : Optional[int] = 'beam'
a__ : Dict = 2
a__ : List[Any] = 6_4
a__ : str = 2_5_6
a__ : Any = None
a__ : List[str] = None
a__ : Any = st.sidebar.checkbox('Generation options')
if generate_options:
a__ : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
a__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
a__ : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
a__ : Optional[int] = st.sidebar.slider(
'Maximum generation length', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
a__ : Union[str, Any] = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
a__ : Optional[int] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
a__ : str = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
a__ : List[Any] = None
# start main text
a__ : Union[str, Any] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
a__ : str = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
a__ : Union[str, Any] = st.text_input('Enter your question here:', '')
else:
a__ : List[Any] = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
a__ , a__ : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=1_0)
a__ , a__ : int = make_support(question, source=wiki_source, method='sparse', n_results=1_0)
a__ : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
a__ : Dict = support_list[:1_0]
a__ : List[str] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
a__ , a__ : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
a__ , a__ : Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
a__ : Tuple = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
a__ : List[Any] = res[1].strip()
if sec_titles == "":
a__ : str = '[{}]({})'.format(res[0], wiki_url)
else:
a__ : Union[str, Any] = sec_titles.split(' & ')
a__ : List[str] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
a__ : List[str] = find_nearest_training(question)
a__ : Optional[Any] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
a__ : Optional[int] = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
a__ : Dict = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 223
|
'''simple docstring'''
import os
from pathlib import Path
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
from torch.utils.cpp_extension import load
UpperCamelCase__ = Path(__A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCamelCase__ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , __A , with_cuda=__A , extra_include_paths=[str(__A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 223
| 1
|
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Union[str, Any] = len(_lowerCamelCase )
# We need to create solution object to save path.
__snake_case : Optional[Any] = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
__snake_case : int = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase )
if solved:
print("""\n""".join(str(_lowerCamelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : str = len(_lowerCamelCase )
# Final check point.
if i == j == (size - 1):
__snake_case : Tuple = 1
return True
__snake_case : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
__snake_case : int = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__snake_case : Union[str, Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__snake_case : Union[str, Any] = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase )
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase )
):
return True
__snake_case : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def A ( snake_case__ : Dict ) -> Any:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_) -> Optional[int]:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , lowercase_ , bias=lowercase_) , nn.Linear(lowercase_ , module.out_features , bias=lowercase_) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowercase_)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def _a ( self , lowercase_ , *lowercase_ , **lowercase_) -> List[str]:
return self.module(lowercase_ , *lowercase_ , **lowercase_) + self.adapter(lowercase_)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__UpperCAmelCase = '''bigscience/bloom-1b7'''
# Constant values
__UpperCAmelCase = 2.109659552692574
__UpperCAmelCase = '''Hello my name is'''
__UpperCAmelCase = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
__UpperCAmelCase = 10
def _a ( self) -> Tuple:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name)
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Any:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
def _a ( self) -> int:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> List[str]:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(lowercase_ , 'quantization_config'))
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def _a ( self) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
__snake_case = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def _a ( self) -> Tuple:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowercase_ , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def _a ( self) -> Optional[int]:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt')
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase_) , self.EXPECTED_OUTPUTS)
def _a ( self) -> Optional[Any]:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowercase_ , device_map='auto')
__snake_case = self.tokenizer(self.input_text , return_tensors='pt')
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase_) , self.EXPECTED_OUTPUTS)
def _a ( self) -> List[Any]:
with self.assertRaises(lowercase_), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(lowercase_):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowercase_ , load_in_abit=lowercase_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def _a ( self) -> Optional[int]:
with self.assertRaises(lowercase_):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(lowercase_):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(lowercase_):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(lowercase_):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowercase_):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt')
__snake_case = self.model_fpaa.to(torch.floataa)
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu')
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def _a ( self) -> Optional[int]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowercase_ , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def _a ( cls) -> List[str]:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name)
__snake_case = 'Translate in German: Hello, my dog is cute'
def _a ( self) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> int:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
__snake_case = self.tokenizer(self.input_text , return_tensors='pt').to(0)
__snake_case = model.generate(**lowercase_)
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowercase_ , device_map='auto')
__snake_case = self.tokenizer(self.input_text , return_tensors='pt').to(0)
__snake_case = model.generate(**lowercase_)
__snake_case = modules
def _a ( self) -> Union[str, Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
__snake_case = self.tokenizer(self.input_text , return_tensors='pt').to(0)
__snake_case = model.generate(**lowercase_)
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowercase_ , device_map='auto')
__snake_case = self.tokenizer(self.input_text , return_tensors='pt').to(0)
__snake_case = model.generate(**lowercase_)
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> int:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowercase_ , device_map='auto')
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase_ , device_map='auto')
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowercase_ , device_map='auto')
def _a ( self) -> Union[str, Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Dict:
super().setUp()
def _a ( self) -> Optional[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> Optional[Any]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Any:
super().setUp()
def _a ( self) -> Union[str, Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowercase_ , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowercase_) , self.EXPECTED_OUTPUTS)
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
__snake_case = 'facebook/opt-350m'
super().setUp()
def _a ( self) -> List[str]:
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase_)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowercase_)):
__snake_case = LoRALayer(module.q_proj , rank=1_6)
__snake_case = LoRALayer(module.k_proj , rank=1_6)
__snake_case = LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**lowercase_)
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowercase_ , lowercase_):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(lowercase_ , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''gpt2-xl'''
__UpperCAmelCase = 3.3191854854152187
| 313
| 0
|
import os
def _a ( lowercase__ : str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as input_file:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
[int(lowercase__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE__ : List[Any] = len(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = len(matrix[0] )
SCREAMING_SNAKE_CASE__ : int = [[-1 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Dict = matrix[i][0]
for j in range(1 , lowercase__ ):
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : str = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ : int = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 636
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger()
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
def __lowercase( self : Dict , a_ : Dict , a_ : Tensor , a_ : Tensor )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self : Tuple , a_ : Tensor )-> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 1
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = True
def __call__( self : List[Any] , a_ : Tensor )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(a_ )} operations while'''
F''' destination module has {len(a_ )}.''' )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class snake_case ( nn.Module ):
def __init__( self : List[Any] , a_ : nn.Module )-> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'''Unexpected layer name {k}'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleDict(a_ )
def __lowercase( self : Tuple , a_ : Tensor )-> Dict:
"""simple docstring"""
return get_trunk_forward_outputs(
a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , )
class snake_case ( UpperCamelCase_ ):
def __lowercase( self : Optional[Any] , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Union[str, Any] , a_ : str )-> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE__ : Any = self.convert_name_to_timm(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) )
else:
SCREAMING_SNAKE_CASE__ : List[str] = super().__getitem__(a_ )
return val
class snake_case ( UpperCamelCase_ ):
def __getitem__( self : Any , a_ : str )-> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE__ : Any = RegNetModel
else:
SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification
return val
def _a ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _a ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model_func()
SCREAMING_SNAKE_CASE__ : int = our_model_func(lowercase__ ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowercase__ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE__ : Optional[Any] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ )
our_model.load_state_dict(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = our_model(lowercase__ , output_hidden_states=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = (
our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE__ : List[Any] = from_model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = from_output[-1] if type(lowercase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24 if 'seer' not in name else 3_84
# we can use the convnext one
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , )
print(f'''Pushed {name}''' )
def _a ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : Tuple = 10_00
SCREAMING_SNAKE_CASE__ : Tuple = (1, num_labels)
SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = idalabel
SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE__ : Dict = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' )
SCREAMING_SNAKE_CASE__ : Tuple = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE__ : str = files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE__ : str = model_state_dict['trunk']
model.load_state_dict(lowercase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : int = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 636
| 1
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase_ ( _snake_case ):
for param in module.parameters():
SCREAMING_SNAKE_CASE__ : int = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE__ : List[Any] = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datetime.now()
SCREAMING_SNAKE_CASE__ : Tuple = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 223
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase__ : Optional[Any] = 2_5_0_0_0_4
UpperCAmelCase__ : Union[str, Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = MBartTokenizer
__UpperCamelCase : Any = MBartTokenizerFast
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : int = True
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = MBartTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE__ : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE__ : Dict = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = '''facebook/mbart-large-en-ro'''
__UpperCamelCase : Dict = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__UpperCamelCase : Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__UpperCamelCase : Optional[int] = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def __magic_name__ (cls ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
SCREAMING_SNAKE_CASE__ : List[Any] = 1
return cls
def __magic_name__ (self ) -> int:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Dict = targets["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 223
| 1
|
import torch
from transformers import AutoModel
class a (torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : List[Any]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(__a , self ).__init__()
__snake_case : Union[str, Any] = AutoModel.from_pretrained(__a , return_dict=__a )
__snake_case : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-08 )
__snake_case : Optional[Any] = torch.nn.Softmax(dim=1 )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.bert(**__a ).last_hidden_state
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Any ) -> Union[str, Any]:
return token_embeddings.sum(2 , keepdim=__a )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : int=1 ) -> str:
return self.softmax(T * self.cos(__a , __a ) )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> Optional[int]:
__snake_case : str = W_supports["sizes"].tolist()
__snake_case : Union[str, Any] = W_supports["start_token_id"].item()
__snake_case : Tuple = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__snake_case : List[str] = self.BERT(**__a )
__snake_case : int = self.BERT(**__a )
__snake_case : Dict = None
__snake_case : int = None
__snake_case : Tuple = W_supports["input_ids"] == start_token_id
__snake_case : Optional[Any] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__snake_case : Dict = 0
else:
__snake_case : Tuple = support_sizes[i - 1]
__snake_case : Dict = S[s : s + size][start_token_masks[s : s + size]]
__snake_case : str = S[s : s + size][end_token_masks[s : s + size]]
__snake_case : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__snake_case : str = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__snake_case : Any = torch.vstack((p_starts, p_start) )
__snake_case : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
__snake_case : List[str] = p_start
__snake_case : Dict = p_end
return p_starts, p_ends
| 707
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_snake_case : Any = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ):
# Recurse if needed
if "." in tensor_name:
__snake_case : Tuple = tensor_name.split("." )
for split in splits[:-1]:
__snake_case : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
__snake_case : Optional[int] = new_module
__snake_case : Union[str, Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
__snake_case : Optional[int] = tensor_name in module._buffers
__snake_case : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__snake_case : List[Any] = False
__snake_case : Optional[int] = False
if is_buffer or not is_bitsandbytes_available():
__snake_case : Dict = False
__snake_case : Optional[int] = False
else:
__snake_case : Any = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__snake_case : Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__snake_case : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__snake_case : Any = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__snake_case : int = value.to("cpu" )
if value.dtype == torch.inta:
__snake_case : Optional[int] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
__snake_case : Optional[int] = torch.tensor(__lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCamelCase ) and fpaa_statistics is None:
__snake_case : List[Any] = new_value.T
__snake_case : Tuple = old_value.__dict__
if is_abit:
__snake_case : List[str] = bnb.nn.IntaParams(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
elif is_abit:
__snake_case : Optional[int] = bnb.nn.Paramsabit(__lowerCamelCase , requires_grad=__lowerCamelCase , **__lowerCamelCase ).to(__lowerCamelCase )
__snake_case : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(__lowerCamelCase ) )
else:
if value is None:
__snake_case : Tuple = old_value.to(__lowerCamelCase )
elif isinstance(__lowerCamelCase , torch.Tensor ):
__snake_case : Dict = value.to(__lowerCamelCase )
else:
__snake_case : List[Any] = torch.tensor(__lowerCamelCase , device=__lowerCamelCase )
if is_buffer:
__snake_case : Optional[Any] = new_value
else:
__snake_case : int = nn.Parameter(__lowerCamelCase , requires_grad=old_value.requires_grad )
__snake_case : Tuple = new_value
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False ):
for name, module in model.named_children():
if current_key_name is None:
__snake_case : List[Any] = []
current_key_name.append(__lowerCamelCase )
if (isinstance(__lowerCamelCase , nn.Linear ) or isinstance(__lowerCamelCase , __lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(__lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Union[str, Any] = module.weight.shape
else:
__snake_case : List[Any] = module.in_features
__snake_case : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__snake_case : Tuple = bnb.nn.LinearabitLt(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__snake_case : List[str] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__snake_case : Optional[int] = bnb.nn.Linearabit(
__lowerCamelCase , __lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__snake_case : int = True
# Store the module class in case we need to transpose the weight later
__snake_case : str = type(__lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCamelCase )
if len(list(module.children() ) ) > 0:
__snake_case , __snake_case : Tuple = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_been_replaced=__lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ):
__snake_case : List[str] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
__snake_case , __snake_case : Any = _replace_with_bnb_linear(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , __lowerCamelCase , )
return replace_with_bnb_linear(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( *__lowerCamelCase , **__lowerCamelCase ):
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , __lowerCamelCase , )
return set_module_quantized_tensor_to_device(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__snake_case : Any = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__snake_case : Any = sum(__lowerCamelCase , [] )
__snake_case : Optional[int] = len(__lowerCamelCase ) > 0
# Check if it is a base model
__snake_case : Dict = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__snake_case : Optional[int] = list(model.named_children() )
__snake_case : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
__snake_case : int = set(__lowerCamelCase ) - set(__lowerCamelCase )
__snake_case : Optional[Any] = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
__snake_case : Optional[Any] = [".weight", ".bias"]
__snake_case : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__snake_case : Optional[int] = name.replace(__lowerCamelCase , "" )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
| 203
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 1_0
_UpperCAmelCase = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_UpperCAmelCase = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(a__ ) ),
} , features=a__ , )
return dataset
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple , a__: List[str] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=a__ )
return filename
# FILE_CONTENT + files
lowerCAmelCase__ :str = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt'
_UpperCAmelCase = FILE_CONTENT
with open(a__ , 'w' ) as f:
f.write(a__ )
return filename
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
import bza
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_UpperCAmelCase = bytes(a__ , 'utf-8' )
with bza.open(a__ , 'wb' ) as f:
f.write(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict ) -> Optional[int]:
'''simple docstring'''
import gzip
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_UpperCAmelCase = bytes(a__ , 'utf-8' )
with gzip.open(a__ , 'wb' ) as f:
f.write(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: int ) -> Any:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_UpperCAmelCase = bytes(a__ , 'utf-8' )
with lza.frame.open(a__ , 'wb' ) as f:
f.write(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[Any] , a__: Any ) -> List[Any]:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(a__ , 'w' ) as archive:
archive.write(a__ , arcname=os.path.basename(a__ ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[Any] , a__: Any ) -> str:
'''simple docstring'''
import tarfile
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(a__ , 'w' ) as f:
f.add(a__ , arcname=os.path.basename(a__ ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict ) -> Any:
'''simple docstring'''
import lzma
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_UpperCAmelCase = bytes(a__ , 'utf-8' )
with lzma.open(a__ , 'wb' ) as f:
f.write(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Any ) -> List[str]:
'''simple docstring'''
import zipfile
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.basename(a__ ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Any ) -> List[str]:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_UpperCAmelCase = bytes(a__ , 'utf-8' )
with zstd.open(a__ , 'wb' ) as f:
f.write(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.xml'
_UpperCAmelCase = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(a__ , 'w' ) as f:
f.write(a__ )
return filename
lowerCAmelCase__ :Tuple = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowerCAmelCase__ :List[Any] = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowerCAmelCase__ :List[Any] = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase__ :int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowerCAmelCase__ :Tuple = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = datasets.Dataset.from_dict(a__ )
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(a__ ) ) as con:
_UpperCAmelCase = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(a__ , 'w' , newline='' ) as f:
_UpperCAmelCase = csv.DictWriter(a__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(a__ , 'w' , newline='' ) as f:
_UpperCAmelCase = csv.DictWriter(a__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple , a__: List[str] ) -> Dict:
'''simple docstring'''
import bza
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(a__ , 'rb' ) as f:
_UpperCAmelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a__ , 'wb' ) as f:
f.write(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.basename(a__ ) )
f.write(a__ , arcname=os.path.basename(a__ ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict , a__: List[Any] , a__: Tuple ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(a__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: List[str] ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.join('main_dir' , os.path.basename(a__ ) ) )
f.write(a__ , arcname=os.path.join('main_dir' , os.path.basename(a__ ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_UpperCAmelCase = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(a__ , 'wb' ) as f:
_UpperCAmelCase = pq.ParquetWriter(a__ , schema=a__ )
_UpperCAmelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a__ ) )] for k in DATA[0]} , schema=a__ )
writer.write_table(a__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_UpperCAmelCase = {'data': DATA}
with open(a__ , 'w' ) as f:
json.dump(a__ , a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_UpperCAmelCase = {'data': DATA_DICT_OF_LISTS}
with open(a__ , 'w' ) as f:
json.dump(a__ , a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(a__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(a__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(a__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(a__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Any ) -> int:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(a__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(a__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(a__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(a__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: str ) -> Union[str, Any]:
'''simple docstring'''
import gzip
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(a__ , 'rb' ) as orig_file:
with gzip.open(a__ , 'wb' ) as zipped_file:
zipped_file.writelines(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] , a__: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
import gzip
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(a__ , 'rb' ) as orig_file:
with gzip.open(a__ , 'wb' ) as zipped_file:
zipped_file.writelines(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Dict , a__: Tuple ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.basename(a__ ) )
f.write(a__ , arcname=os.path.basename(a__ ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] , a__: Optional[int] , a__: List[Any] , a__: int ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.join('nested' , os.path.basename(a__ ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[str] , a__: Optional[Any] , a__: List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.join('main_dir' , os.path.basename(a__ ) ) )
f.write(a__ , arcname=os.path.join('main_dir' , os.path.basename(a__ ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: List[str] , a__: List[str] , a__: str ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(a__ , 'w' ) as f:
f.add(a__ , arcname=os.path.basename(a__ ) )
f.add(a__ , arcname=os.path.basename(a__ ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[Any] , a__: Optional[int] , a__: List[str] , a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(a__ , 'w' ) as f:
f.add(a__ , arcname=os.path.join('nested' , os.path.basename(a__ ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = ['0', '1', '2', '3']
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(a__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = ['0', '1', '2', '3']
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(a__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: str ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ['0', '1', '2', '3']
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(a__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: int , a__: Tuple , a__: Dict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.basename(a__ ) )
f.write(a__ , arcname=os.path.basename(a__ ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Any , a__: int , a__: Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.join('main_dir' , os.path.basename(a__ ) ) )
f.write(a__ , arcname=os.path.join('main_dir' , os.path.basename(a__ ) ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Tuple , a__: List[str] , a__: Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(a__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(a__ )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: str ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(a__ , 'w' ) as f:
f.write(a__ , arcname=os.path.basename(a__ ) )
f.write(a__ , arcname=os.path.basename(a__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowerCAmelCase__ ( a__: int ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 618
|
import math
import qiskit
def lowerCAmelCase__ ( a__: int = 1 , a__: int = 1 , a__: int = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(a__ , a__ )
or isinstance(a__ , a__ )
or isinstance(a__ , a__ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(a__ ) != input_a)
or (math.floor(a__ ) != input_a)
or (math.floor(a__ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_UpperCAmelCase = qiskit.QuantumRegister(4 , 'qr' )
_UpperCAmelCase = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_UpperCAmelCase = [input_a, input_a, carry_in]
_UpperCAmelCase = qiskit.QuantumCircuit(a__ , a__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(a__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , a__ ) # measure the last two qbits
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
_UpperCAmelCase = qiskit.execute(a__ , a__ , shots=1_0_0_0 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 618
| 1
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]="" , SCREAMING_SNAKE_CASE__ : Dict="train" ):
assert os.path.isdir(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
lowerCamelCase__ = os.listdir(SCREAMING_SNAKE_CASE__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE__ )
def __len__( self : Union[str, Any] ):
return len(self.documents )
def __getitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = self.documents[idx]
lowerCamelCase__ = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as source:
lowerCamelCase__ = source.read()
lowerCamelCase__ , lowerCamelCase__ = process_story(SCREAMING_SNAKE_CASE__ )
return document_name, story_lines, summary_lines
def snake_case ( _a: List[Any] )-> int:
'''simple docstring'''
lowerCamelCase__ = list(filter(lambda _a : len(_a ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase__ = [_add_missing_period(_a ) for line in nonempty_lines]
# gather article lines
lowerCamelCase__ = []
lowerCamelCase__ = deque(_a )
while True:
try:
lowerCamelCase__ = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_a )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase__ = list(filter(lambda _a : not t.startswith('@highlight' ) , _a ) )
return story_lines, summary_lines
def snake_case ( _a: Tuple )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case ( _a: Optional[int] , _a: int , _a: Optional[Any] )-> str:
'''simple docstring'''
if len(_a ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_a )) )
return sequence
def snake_case ( _a: str , _a: List[Any] )-> int:
'''simple docstring'''
lowerCamelCase__ = torch.ones_like(_a )
lowerCamelCase__ = sequence == pad_token_id
lowerCamelCase__ = 0
return mask
def snake_case ( _a: Any , _a: Tuple , _a: Optional[int] )-> int:
'''simple docstring'''
lowerCamelCase__ = [tokenizer.encode(_a ) for line in story_lines]
lowerCamelCase__ = [token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase__ = [tokenizer.encode(_a ) for line in summary_lines]
lowerCamelCase__ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case ( _a: int , _a: str )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for sequence in batch:
lowerCamelCase__ = -1
lowerCamelCase__ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_a )
return torch.tensor(_a )
| 721
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowercase( lowerCamelCase_ ):
'''simple docstring'''
__a : List[str] = ['image_processor', 'feature_extractor']
__a : List[Any] = 'TvltImageProcessor'
__a : str = 'TvltFeatureExtractor'
def __init__( self , __a , __a ):
super().__init__(image_processor=__a , feature_extractor=__a )
__lowerCamelCase : Dict = image_processor
__lowerCamelCase : Optional[Any] = feature_extractor
def __call__( self , __a=None , __a=None , __a=None , __a=None , __a=False , __a=False , *__a , **__a , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
__lowerCamelCase : int = None
if images is not None:
__lowerCamelCase : Union[str, Any] = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowerCamelCase : Optional[int] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowerCamelCase : Dict = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowerCamelCase : Dict = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.image_processor.model_input_names
__lowerCamelCase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 594
|
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[Any] = VideoClassificationPipeline(model=lowerCamelCase , image_processor=lowerCamelCase , top_k=2 )
UpperCamelCase : List[str] = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for example in examples:
UpperCamelCase : int = video_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCamelCase : Any = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
UpperCamelCase : List[str] = pipeline(
"video-classification" , model=lowerCamelCase , feature_extractor=lowerCamelCase , frame_sampling_rate=4 )
UpperCamelCase : int = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[str] = video_classifier(lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
UpperCamelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
| 173
| 0
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
class a__ ( lowercase__ ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 715
|
from __future__ import annotations
lowerCamelCase_ : List[Any] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> None:
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def __SCREAMING_SNAKE_CASE ( self ) -> None:
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCAmelCase )
__a = vertex
queue.append(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(UpperCAmelCase )
if target_vertex_parent is None:
__a = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCAmelCase )
return self.shortest_path(UpperCAmelCase ) + f'''->{target_vertex}'''
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 246
| 0
|
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] ):
# we need a list not a string, so do something to change the type
SCREAMING_SNAKE_CASE = arr.split("," )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__A : Tuple = input('please input some numbers:')
__A : str = SubArray(whole_array)
__A : List[Any] = array.solve_sub_array()
print(('the results is:', re))
| 16
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase , UpperCamelCase , UpperCamelCase = False, False, False
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : Optional[str] = None
# Automatically constructed
lowerCAmelCase__ : ClassVar[str] = "dict"
lowerCAmelCase__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
lowerCAmelCase__ : str = field(default="""Audio""", init=__lowerCamelCase, repr=__lowerCamelCase )
def __call__( self ) -> Optional[int]:
return self.pa_type
def A ( self , _SCREAMING_SNAKE_CASE ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
a_ : Dict = BytesIO()
sf.write(_SCREAMING_SNAKE_CASE , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
a_ : Optional[Any] = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
a_ : List[str] = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7
a_ : Optional[Any] = BytesIO(bytes() )
sf.write(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
a_ , a_ : Tuple = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
a_ : List[Any] = xsplitext(_SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
a_ : Dict = token_per_repo_id or {}
a_ : Optional[int] = path.split("::" )[-1]
try:
a_ : Tuple = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["repo_id"]
a_ : Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
a_ : Tuple = None
with xopen(_SCREAMING_SNAKE_CASE , "rb" , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
a_ , a_ : Optional[Any] = sf.read(_SCREAMING_SNAKE_CASE )
else:
a_ , a_ : Optional[int] = sf.read(_SCREAMING_SNAKE_CASE )
a_ : Tuple = array.T
if self.mono:
a_ : Optional[Any] = librosa.to_mono(_SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
a_ : Optional[int] = librosa.resample(_SCREAMING_SNAKE_CASE , orig_sr=_SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
a_ : List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def A ( self , _SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
a_ : List[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
a_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a_ : Tuple = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
a_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
a_ : str = pa.array([Audio().encode_example(_SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
a_ : Union[str, Any] = storage.field("bytes" )
else:
a_ : Optional[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
a_ : List[str] = storage.field("path" )
else:
a_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
a_ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def A ( self , _SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , "rb" ) as f:
a_ : Union[str, Any] = f.read()
return bytes_
a_ : List[str] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
a_ : Tuple = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
a_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
| 473
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCamelCase ( UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : str=None , UpperCamelCase : Tuple=None , UpperCamelCase : Tuple=None , UpperCamelCase : Any=None , UpperCamelCase : Any=None , ) -> Optional[int]:
if attention_mask is None:
_lowerCamelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowerCamelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowerCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , snake_case__ : Tuple , snake_case__ : Any=1_3 , snake_case__ : Tuple=7 , snake_case__ : str=True , snake_case__ : Optional[int]=False , snake_case__ : str=9_9 , snake_case__ : int=1_6 , snake_case__ : List[Any]=2 , snake_case__ : int=4 , snake_case__ : Optional[Any]=4 , snake_case__ : List[str]="gelu" , snake_case__ : int=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=3_2 , snake_case__ : List[str]=2 , snake_case__ : Tuple=1 , snake_case__ : str=0 , snake_case__ : Tuple=0.02 , ) -> List[str]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = eos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = initializer_range
def _snake_case ( self : Optional[Any] ) -> Dict:
_lowerCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowerCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowerCamelCase = shift_tokens_right(snake_case__ , 1 , 2 )
_lowerCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case__ , )
_lowerCamelCase = prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def _snake_case ( self : int ) -> Any:
_lowerCamelCase , _lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple ) -> Any:
_lowerCamelCase = 2_0
_lowerCamelCase = model_class_name(snake_case__ )
_lowerCamelCase = model.encode(inputs_dict['input_ids'] )
_lowerCamelCase , _lowerCamelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
_lowerCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
_lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case__ , )
_lowerCamelCase = model.decode(snake_case__ , snake_case__ )
_lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : int , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[Any] ) -> int:
_lowerCamelCase = 2_0
_lowerCamelCase = model_class_name(snake_case__ )
_lowerCamelCase = model.encode(inputs_dict['input_ids'] )
_lowerCamelCase , _lowerCamelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
_lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
_lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case__ , decoder_position_ids=snake_case__ , )
_lowerCamelCase = model.decode(snake_case__ , snake_case__ , decoder_attention_mask=snake_case__ )
_lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = 99
def _snake_case ( self : Tuple ) -> Tuple:
_lowerCamelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowerCamelCase = input_ids.shape[0]
_lowerCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self : Union[str, Any] ) -> str:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = self._get_config_and_data()
_lowerCamelCase = FlaxBlenderbotSmallForConditionalGeneration(snake_case__ )
_lowerCamelCase = lm_model(input_ids=snake_case__ )
_lowerCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , snake_case__ )
def _snake_case ( self : Dict ) -> str:
_lowerCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowerCamelCase = FlaxBlenderbotSmallForConditionalGeneration(snake_case__ )
_lowerCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowerCamelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowerCamelCase = lm_model(input_ids=snake_case__ , decoder_input_ids=snake_case__ )
_lowerCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , snake_case__ )
def _snake_case ( self : int ) -> Union[str, Any]:
_lowerCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowerCamelCase = shift_tokens_right(snake_case__ , 1 , 2 )
_lowerCamelCase = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum()
_lowerCamelCase = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(snake_case__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self : int ) -> Union[str, Any]:
_lowerCamelCase = FlaxBlenderbotSmallModelTester(self )
def _snake_case ( self : str ) -> Any:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ )
def _snake_case ( self : str ) -> Any:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case__ , snake_case__ , snake_case__ )
def _snake_case ( self : Union[str, Any] ) -> Any:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(snake_case__ , snake_case__ )
_lowerCamelCase = model_class(snake_case__ )
@jax.jit
def encode_jitted(snake_case__ : str , snake_case__ : Any=None , **snake_case__ : List[str] ):
return model.encode(input_ids=snake_case__ , attention_mask=snake_case__ )
with self.subTest('JIT Enabled' ):
_lowerCamelCase = encode_jitted(**snake_case__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCamelCase = encode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[str] ) -> int:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowerCamelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case__ : str , snake_case__ : str , snake_case__ : Tuple ):
return model.decode(
decoder_input_ids=snake_case__ , decoder_attention_mask=snake_case__ , encoder_outputs=snake_case__ , )
with self.subTest('JIT Enabled' ):
_lowerCamelCase = decode_jitted(**snake_case__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCamelCase = decode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : Tuple ) -> List[str]:
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowerCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
_lowerCamelCase = model(snake_case__ )
self.assertIsNotNone(snake_case__ )
| 234
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = LxmertTokenizer
lowerCAmelCase_ = LxmertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def _snake_case ( self : List[str] ) -> Any:
super().setUp()
_lowerCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _snake_case ( self : Any , snake_case__ : Dict ) -> str:
_lowerCamelCase = 'UNwant\u00E9d,running'
_lowerCamelCase = 'unwanted, running'
return input_text, output_text
def _snake_case ( self : Optional[int] ) -> List[Any]:
_lowerCamelCase = self.tokenizer_class(self.vocab_file )
_lowerCamelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 1_0, 8, 9] )
def _snake_case ( self : Any ) -> List[str]:
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = 'I was born in 92000, and this is falsé.'
_lowerCamelCase = tokenizer.tokenize(snake_case__ )
_lowerCamelCase = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCamelCase = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCamelCase = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(snake_case__ )
_lowerCamelCase = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 234
| 1
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[int] ) -> Optional[int]:
UpperCamelCase__ : Optional[int] = TaConfig.from_json_file(_UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
UpperCamelCase__ : List[Any] = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 253
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Tuple = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = """rwkv"""
_lowerCAmelCase = {"""max_position_embeddings""": """context_length"""}
def __init__( self , A_=5_02_77 , A_=10_24 , A_=40_96 , A_=32 , A_=None , A_=None , A_=1e-5 , A_=0 , A_=0 , A_=6 , A_=False , A_=True , **A_ , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = context_length
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = rescale_every
_UpperCamelCase = use_cache
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
super().__init__(
tie_word_embeddings=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 138
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( _lowerCamelCase ):
snake_case_ = 42
snake_case_ = 42
def __init__( self : List[Any] , snake_case : Tuple , snake_case : Dict ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[Any] , snake_case : List[str] = 1 , snake_case : Union[str, Any] = 2000 , snake_case : str = None , snake_case : Union[str, Any] = "pil" , snake_case : List[Any] = True , **snake_case : Tuple , ):
'''simple docstring'''
A__ : int = self.unet.config.sample_size
A__ : List[Any] = (batch_size, 3, img_size, img_size)
A__ : List[Any] = self.unet
A__ : Any = randn_tensor(A__ , generator=A__ ) * self.scheduler.init_noise_sigma
A__ : Dict = sample.to(self.device )
self.scheduler.set_timesteps(A__ )
self.scheduler.set_sigmas(A__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A__ : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
A__ : Optional[Any] = self.unet(A__ , A__ ).sample
A__ : Optional[int] = self.scheduler.step_correct(A__ , A__ , generator=A__ ).prev_sample
# prediction step
A__ : Tuple = model(A__ , A__ ).sample
A__ : Any = self.scheduler.step_pred(A__ , A__ , A__ , generator=A__ )
A__ , A__ : Union[str, Any] = output.prev_sample, output.prev_sample_mean
A__ : str = sample_mean.clamp(0 , 1 )
A__ : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : List[Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A__ )
| 700
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 42
snake_case_ = 42
def __init__( self : Union[str, Any] , snake_case : UNetaDModel , snake_case : KarrasVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self : Any , snake_case : int = 1 , snake_case : int = 50 , snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case : Optional[str] = "pil" , snake_case : bool = True , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = self.unet.config.sample_size
A__ : Dict = (batch_size, 3, img_size, img_size)
A__ : Tuple = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A__ : Union[str, Any] = randn_tensor(snake_case , generator=snake_case , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A__ : Optional[Any] = self.scheduler.schedule[t]
A__ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A__ , A__ : Dict = self.scheduler.add_noise_to_input(snake_case , snake_case , generator=snake_case )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A__ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , snake_case )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ : Dict = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A__ : Tuple = self.scheduler.step_correct(
snake_case , snake_case , snake_case , snake_case , step_output.prev_sample , step_output["""derivative"""] , )
A__ : Tuple = step_output.prev_sample
A__ : Dict = (sample / 2 + 0.5).clamp(0 , 1 )
A__ : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Optional[Any] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 498
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Optional[Any]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> str:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = LlamaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Any:
_a = True
_a = LlamaModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
_a = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
_a = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Tuple:
_a = LlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> str:
_a = True
_a = True
_a = LlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
_a = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )['hidden_states'][0]
_a = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['hidden_states'][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
(
_a
) = config_and_inputs
_a = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCAmelCase : List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : int = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
def __lowerCAmelCase ( self ) -> Dict:
_a = LlamaModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['input_ids']
_a = input_ids.ne(1 ).to(snake_case_ )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = LlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = 'single_label_classification'
_a = input_dict['input_ids']
_a = input_ids.ne(1 ).to(snake_case_ )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = LlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = 'multi_label_classification'
_a = input_dict['input_ids']
_a = input_ids.ne(1 ).to(snake_case_ )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = LlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 1_0] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_a = LlamaModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
_a = original_model(snake_case_ ).last_hidden_state
_a = original_model(snake_case_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_a = {'type': scaling_type, 'factor': 10.0}
_a = LlamaModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
_a = scaled_model(snake_case_ ).last_hidden_state
_a = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
@require_torch
class A ( unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
_a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_a = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , snake_case_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
_a = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
_a = model(torch.tensor(snake_case_ ) )
# Expected mean on dim = -1
_a = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , snake_case_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
_a = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
_a = model(torch.tensor(snake_case_ ) )
# Expected mean on dim = -1
_a = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_a = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def __lowerCAmelCase ( self ) -> Any:
_a = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
_a = model(torch.tensor(snake_case_ ) )
_a = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
_a = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , snake_case_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def __lowerCAmelCase ( self ) -> str:
_a = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_a = 'Simply put, the theory of relativity states that '
_a = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
_a = tokenizer.encode(snake_case_ , return_tensors="pt" )
_a = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=snake_case_ )
# greedy generation outputs
_a = model.generate(snake_case_ , max_new_tokens=6_4 , top_p=snake_case_ , temperature=1 , do_sample=snake_case_ )
_a = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 131
|
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
if len(re.findall('[ATCG]' , lowercase__ ) ) != len(lowercase__ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630
| 0
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowercase_ : Optional[Any] = XCLIPTextConfig()
# derive patch size from model name
lowercase_ : Dict = model_name.find("""patch""" )
lowercase_ : Dict = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
lowercase_ : Optional[int] = XCLIPVisionConfig(patch_size=UpperCAmelCase__ , num_frames=UpperCAmelCase__ )
if "large" in model_name:
lowercase_ : List[str] = 768
lowercase_ : Optional[int] = 3072
lowercase_ : Dict = 12
lowercase_ : Dict = 1024
lowercase_ : Any = 4096
lowercase_ : List[str] = 16
lowercase_ : str = 24
lowercase_ : int = 768
lowercase_ : Tuple = 3072
if model_name == "xclip-large-patch14-16-frames":
lowercase_ : Any = 336
lowercase_ : List[str] = XCLIPConfig.from_text_vision_configs(UpperCAmelCase__ , UpperCAmelCase__ )
if "large" in model_name:
lowercase_ : List[Any] = 768
return config
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
# text encoder
if name == "token_embedding.weight":
lowercase_ : List[str] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
lowercase_ : List[str] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
lowercase_ : str = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase_ : List[str] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase_ : int = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase_ : Dict = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
lowercase_ : Optional[Any] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
lowercase_ : Optional[int] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
lowercase_ : Optional[int] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
lowercase_ : Any = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
lowercase_ : str = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
lowercase_ : List[Any] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
lowercase_ : Dict = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
lowercase_ : List[Any] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
lowercase_ : List[str] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
lowercase_ : Any = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
lowercase_ : int = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
lowercase_ : Optional[Any] = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
lowercase_ : Union[str, Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
lowercase_ : List[Any] = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
lowercase_ : List[Any] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
lowercase_ : Any = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> List[Any]:
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(UpperCAmelCase__ )
if "attn.in_proj" in key:
lowercase_ : Union[str, Any] = key.split(""".""" )
if key.startswith("""visual""" ):
lowercase_ : Union[str, Any] = key_split[3]
lowercase_ : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase_ : Union[str, Any] = val[
:dim, :
]
lowercase_ : List[str] = val[
dim : dim * 2, :
]
lowercase_ : Dict = val[
-dim:, :
]
else:
lowercase_ : Union[str, Any] = val[
:dim
]
lowercase_ : Optional[int] = val[
dim : dim * 2
]
lowercase_ : Tuple = val[
-dim:
]
else:
if "weight" in key:
lowercase_ : int = val[
:dim, :
]
lowercase_ : Any = val[
dim : dim * 2, :
]
lowercase_ : Optional[int] = val[
-dim:, :
]
else:
lowercase_ : str = val[:dim]
lowercase_ : Tuple = val[
dim : dim * 2
]
lowercase_ : List[Any] = val[-dim:]
elif key.startswith("""mit""" ):
lowercase_ : Tuple = key_split[2]
lowercase_ : Optional[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
lowercase_ : Any = val[:dim, :]
lowercase_ : Optional[int] = val[dim : dim * 2, :]
lowercase_ : int = val[-dim:, :]
else:
lowercase_ : Dict = val[:dim]
lowercase_ : int = val[dim : dim * 2]
lowercase_ : List[Any] = val[-dim:]
else:
lowercase_ : List[Any] = key_split[2]
lowercase_ : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
lowercase_ : List[Any] = val[:dim, :]
lowercase_ : Any = val[
dim : dim * 2, :
]
lowercase_ : Dict = val[-dim:, :]
else:
lowercase_ : List[str] = val[:dim]
lowercase_ : Union[str, Any] = val[
dim : dim * 2
]
lowercase_ : List[str] = val[-dim:]
else:
lowercase_ : str = rename_key(UpperCAmelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase_ : int = val.T
lowercase_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[Any]:
if num_frames == 8:
lowercase_ : Tuple = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
lowercase_ : Union[str, Any] = """eating_spaghetti.npy"""
elif num_frames == 32:
lowercase_ : List[Any] = """eating_spaghetti_32_frames.npy"""
lowercase_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=UpperCAmelCase__ , repo_type="""dataset""" , )
lowercase_ : str = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Any=False ) -> Union[str, Any]:
lowercase_ : Dict = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
lowercase_ : Tuple = model_to_url[model_name]
lowercase_ : List[str] = 8
if "16-frames" in model_name:
lowercase_ : Any = 16
elif "shot" in model_name:
lowercase_ : Dict = 32
lowercase_ : Tuple = get_xclip_config(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[int] = XCLIPModel(UpperCAmelCase__ )
model.eval()
if "drive" in checkpoint_url:
lowercase_ : List[Any] = """pytorch_model.bin"""
gdown.cached_download(UpperCAmelCase__ , UpperCAmelCase__ , quiet=UpperCAmelCase__ )
lowercase_ : Dict = torch.load(UpperCAmelCase__ , map_location="""cpu""" )["""model"""]
else:
lowercase_ : List[str] = torch.hub.load_state_dict_from_url(UpperCAmelCase__ )["""model"""]
lowercase_ : str = convert_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[str] = XCLIPModel(UpperCAmelCase__ )
lowercase_ , lowercase_ : Tuple = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase_ : Any = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
lowercase_ : Optional[int] = VideoMAEImageProcessor(size=UpperCAmelCase__ )
lowercase_ : List[str] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase_ : str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase_ : Optional[int] = XCLIPProcessor(image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowercase_ : List[Any] = prepare_video(UpperCAmelCase__ )
lowercase_ : str = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=UpperCAmelCase__ , return_tensors="""pt""" , padding=UpperCAmelCase__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
lowercase_ : Any = model(**UpperCAmelCase__ )
# Verify outputs
lowercase_ : List[str] = outputs.logits_per_video
lowercase_ : Dict = logits_per_video.softmax(dim=1 )
print("""Probs:""" , UpperCAmelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase_ : List[str] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase_ : List[Any] = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowercase_ : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase_ : str = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowercase_ : Tuple = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase_ : Optional[Any] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase_ : Tuple = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase_ : Optional[Any] = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase_ : Tuple = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase_ : Optional[Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase_ : int = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase_ : Any = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase_ : Union[str, Any] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase_ : Optional[Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase_ : Union[str, Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase_ : Tuple = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase_ : Tuple = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase_ : Any = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(UpperCAmelCase__ , organization="""nielsr""" )
processor.push_to_hub(UpperCAmelCase__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(UpperCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Optional[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCamelCase__ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( *lowerCamelCase_ ,**lowerCamelCase_ ) -> Any:
pass
def _A ( _a : str ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase =(
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> str:
A = pipeline(
"""document-question-answering""" ,model=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
A = INVOICE_URL
A = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) ,lowerCamelCase_ ,"""""" ) ) )
A = """What is the placebo?"""
A = [
{
"""image""": load_image(lowerCamelCase_ ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
A = dqa_pipeline(lowerCamelCase_ ,top_k=2 )
self.assertEqual(
lowerCamelCase_ ,[
[
{"""score""": ANY(lowerCamelCase_ ), """answer""": ANY(lowerCamelCase_ ), """start""": ANY(lowerCamelCase_ ), """end""": ANY(lowerCamelCase_ )},
{"""score""": ANY(lowerCamelCase_ ), """answer""": ANY(lowerCamelCase_ ), """start""": ANY(lowerCamelCase_ ), """end""": ANY(lowerCamelCase_ )},
]
]
* 3 ,)
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ) -> Dict:
A = pipeline("""document-question-answering""" ,model="""hf-internal-testing/tiny-random-layoutlmv2""" )
A = INVOICE_URL
A = """How many cats are there?"""
A = [
{"""score""": 0.00_01, """answer""": """oy 2312/2019""", """start""": 3_8, """end""": 3_9},
{"""score""": 0.00_01, """answer""": """oy 2312/2019 DUE""", """start""": 3_8, """end""": 4_0},
]
A = dqa_pipeline(image=lowerCamelCase_ ,question=lowerCamelCase_ ,top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ ,decimals=4 ) ,lowerCamelCase_ )
A = dqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ ,decimals=4 ) ,lowerCamelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
A = dqa_pipeline(image=lowerCamelCase_ ,question=lowerCamelCase_ ,top_k=2 )
self.assertEqual(lowerCamelCase_ ,[] )
# We can optionnally pass directly the words and bounding boxes
A = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
A = []
A = []
A = dqa_pipeline(image=lowerCamelCase_ ,question=lowerCamelCase_ ,words=lowerCamelCase_ ,boxes=lowerCamelCase_ ,top_k=2 )
self.assertEqual(lowerCamelCase_ ,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = pipeline(
"""document-question-answering""" ,model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" ,revision="""9977165""" ,)
A = INVOICE_URL
A = """What is the invoice number?"""
A = dqa_pipeline(image=lowerCamelCase_ ,question=lowerCamelCase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] ,)
A = dqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] ,)
A = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
[
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
],
]
* 2 ,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = pipeline(
"""document-question-answering""" ,model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" ,revision="""9977165""" ,max_seq_len=5_0 ,)
A = INVOICE_URL
A = """What is the invoice number?"""
A = dqa_pipeline(image=lowerCamelCase_ ,question=lowerCamelCase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] ,)
A = dqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] ,)
A = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
[
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ) -> List[Any]:
A = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" ,revision="""3dc6de3""" ,add_prefix_space=lowerCamelCase_ )
A = pipeline(
"""document-question-answering""" ,model="""impira/layoutlm-document-qa""" ,tokenizer=lowerCamelCase_ ,revision="""3dc6de3""" ,)
A = INVOICE_URL
A = """What is the invoice number?"""
A = dqa_pipeline(image=lowerCamelCase_ ,question=lowerCamelCase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] ,)
A = dqa_pipeline({"""image""": image, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] ,)
A = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
[
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
]
]
* 2 ,)
A = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) ,lowerCamelCase_ ,"""""" ) ) )
# This model should also work if `image` is set to None
A = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 2_3, """end""": 2_3},
] ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ) -> Dict:
A = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" ,revision="""3dc6de3""" ,add_prefix_space=lowerCamelCase_ )
A = pipeline(
"""document-question-answering""" ,model="""impira/layoutlm-document-qa""" ,tokenizer=lowerCamelCase_ ,revision="""3dc6de3""" ,max_seq_len=5_0 ,)
A = INVOICE_URL
A = """What is the invoice number?"""
A = dqa_pipeline(image=lowerCamelCase_ ,question=lowerCamelCase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] ,)
A = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
[
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
]
]
* 2 ,)
A = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) ,lowerCamelCase_ ,"""""" ) ) )
# This model should also work if `image` is set to None
A = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 1_6, """end""": 1_6},
] ,)
@slow
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
A = pipeline(
"""document-question-answering""" ,model="""naver-clova-ix/donut-base-finetuned-docvqa""" ,tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) ,feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" ,)
A = INVOICE_URL
A = """What is the invoice number?"""
A = dqa_pipeline(image=lowerCamelCase_ ,question=lowerCamelCase_ ,top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def UpperCamelCase__ ( self ) -> Tuple:
pass
| 617
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''summary''': Value('''string''' )} )
_lowerCamelCase = "text"
_lowerCamelCase = "summary"
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 617
| 1
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 703
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 0
|
import argparse
from collections import defaultdict
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(__UpperCamelCase , 'r' ) as f:
A__ = f.readlines()
A__ = f'''class {class_name}('''
A__ = f'''{4 * " "}def {test_name}('''
A__ = f'''{8 * " "}{correct_line.split()[0]}'''
A__ = f'''{16 * " "}{correct_line.split()[0]}'''
A__ = False
A__ = False
A__ = False
A__ = False
A__ = 0
A__ = 0
A__ = []
for line in lines:
if line.startswith(__UpperCamelCase ):
A__ = True
elif in_class and line.startswith(__UpperCamelCase ):
A__ = True
elif in_class and in_func and (line.startswith(__UpperCamelCase ) or line.startswith(__UpperCamelCase )):
A__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
A__ = A__ = A__ = A__ = False
else:
new_lines.append(__UpperCamelCase )
with open(__UpperCamelCase , 'w' ) as f:
for line in new_lines:
f.write(__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase=None ) -> str:
if fail is not None:
with open(__UpperCamelCase , 'r' ) as f:
A__ = {l.strip() for l in f.readlines()}
else:
A__ = None
with open(__UpperCamelCase , 'r' ) as f:
A__ = f.readlines()
A__ = defaultdict(__UpperCamelCase )
for line in correct_lines:
A__ , A__ , A__ , A__ = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 9
|
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> int:
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_snake_case : List[Any] = '\nHuman: <<task>>\n\nAssistant: '
_snake_case : Optional[Any] = 'huggingface-tools/default-prompts'
_snake_case : str = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ):
if prompt_or_repo_id is None:
A = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , UpperCamelCase ) is not None:
return prompt_or_repo_id
A = cached_file(
UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 524
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''van'''
def __init__( self :Optional[int] , __UpperCamelCase :Tuple=2_24 , __UpperCamelCase :Tuple=3 , __UpperCamelCase :int=[7, 3, 3, 3] , __UpperCamelCase :List[str]=[4, 2, 2, 2] , __UpperCamelCase :str=[64, 1_28, 3_20, 5_12] , __UpperCamelCase :Union[str, Any]=[3, 3, 12, 3] , __UpperCamelCase :Dict=[8, 8, 4, 4] , __UpperCamelCase :List[Any]="gelu" , __UpperCamelCase :str=0.02 , __UpperCamelCase :str=1e-6 , __UpperCamelCase :Tuple=1e-2 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :List[Any]=0.0 , **__UpperCamelCase :List[str] , ):
super().__init__(**__UpperCamelCase )
A = image_size
A = num_channels
A = patch_sizes
A = strides
A = hidden_sizes
A = depths
A = mlp_ratios
A = hidden_act
A = initializer_range
A = layer_norm_eps
A = layer_scale_init_value
A = drop_path_rate
A = dropout_rate
| 524
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__UpperCAmelCase ="""\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__UpperCAmelCase ="""\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__UpperCAmelCase ="""
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowercase_ ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = CHRF.CHAR_ORDER , UpperCamelCase__ = CHRF.WORD_ORDER , UpperCamelCase__ = CHRF.BETA , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
'''simple docstring'''
A__ = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ = CHRF(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = sb_chrf.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 337
|
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase ="""▁"""
__UpperCAmelCase =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : str = BertGenerationTokenizer
lowercase__ : int = False
lowercase__ : Optional[Any] = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
A__ = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ):
'''simple docstring'''
A__ = "<s>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase__ ) , 10_02 )
def lowercase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowercase_ ( self ):
'''simple docstring'''
A__ = BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = "Hello World!"
A__ = [1_85_36, 22_60, 1_01]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
A__ = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def lowercase_ ( self ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
A__ = " ".join(UpperCamelCase__ )
A__ = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A__ = BertGenerationConfig()
A__ = BertGenerationEncoder(UpperCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 337
| 1
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase_ : List[Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
UpperCAmelCase_ : Dict = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
UpperCAmelCase_ : Tuple = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ),homepage="https://github.com/hendrycks/math",codebase_urls=["https://github.com/hendrycks/math"],)
def lowerCamelCase_ ( self : str,__A : List[Any],__A : List[str] ):
_lowerCamelCase : int = 0.0
for i, j in zip(__A,__A ):
n_correct += 1.0 if math_equivalence.is_equiv(__A,__A ) else 0.0
_lowerCamelCase : Tuple = n_correct / len(__A )
return {
"accuracy": accuracy,
}
| 11
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) )
| 11
| 1
|
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
_a = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , _a ).groups()[0]
class __SCREAMING_SNAKE_CASE (lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[int] , __a : Optional[Any]=None , __a : int=None ):
_a = file_names
_a = image_transform
_a = label_to_id
def __len__( self : Optional[int] ):
return len(self.file_names )
def __getitem__( self : Tuple , __a : int ):
_a = self.file_names[idx]
_a = PIL.Image.open(UpperCamelCase_ )
_a = raw_image.convert("RGB" )
if self.image_transform is not None:
_a = self.image_transform(UpperCamelCase_ )
_a = extract_label(UpperCamelCase_ )
if self.label_to_id is not None:
_a = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCamelCase ( lowercase : Dict , lowercase : Tuple ) -> List[Any]:
if args.with_tracking:
_a = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config['''lr''']
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = config['''image_size''']
if not isinstance(_a , (list, tuple) ):
_a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
_a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_a = int(args.checkpointing_steps )
else:
raise ValueError(
F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_a = os.path.split(_a )[-1].split("." )[0]
accelerator.init_trackers(_a , _a )
# Grab all the image filenames
_a = [os.path.join(args.data_dir , _a ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
_a = [extract_label(_a ) for fname in file_names]
_a = list(set(_a ) )
id_to_label.sort()
_a = {lbl: i for i, lbl in enumerate(_a )}
# Set the seed before splitting the data.
np.random.seed(_a )
torch.manual_seed(_a )
torch.cuda.manual_seed_all(_a )
# Split our filenames between train and validation
_a = np.random.permutation(len(_a ) )
_a = int(0.8 * len(_a ) )
_a = random_perm[:cut]
_a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_a = Compose([RandomResizedCrop(_a , scale=(0.5, 1.0) ), ToTensor()] )
_a = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_a , label_to_id=_a )
# For evaluation, we use a deterministic Resize
_a = Compose([Resize(_a ), ToTensor()] )
_a = PetsDataset([file_names[i] for i in eval_split] , image_transform=_a , label_to_id=_a )
# Instantiate dataloaders.
_a = DataLoader(_a , shuffle=_a , batch_size=_a , num_workers=4 )
_a = DataLoader(_a , shuffle=_a , batch_size=_a , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = create_model("resnet50d" , pretrained=_a , num_classes=len(_a ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_a = False
for param in model.get_classifier().parameters():
_a = True
# We normalize the batches of images to be a bit faster.
_a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
_a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_a = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_a = OneCycleLR(optimizer=_a , max_lr=_a , epochs=_a , steps_per_epoch=len(_a ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a = accelerator.prepare(
_a , _a , _a , _a , _a )
# We need to keep track of how many total steps we have iterated over
_a = 0
# We also need to keep track of the starting epoch so files are named properly
_a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_a = os.path.splitext(_a )[0]
if "epoch" in training_difference:
_a = int(training_difference.replace("epoch_" , "" ) ) + 1
_a = None
else:
_a = int(training_difference.replace("step_" , "" ) )
_a = resume_step // len(_a )
resume_step -= starting_epoch * len(_a )
# Now we train the model
for epoch in range(_a , _a ):
model.train()
if args.with_tracking:
_a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_a = accelerator.skip_first_batches(_a , _a )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_a = {k: v.to(accelerator.device ) for k, v in batch.items()}
_a = (batch['''image'''] - mean) / std
_a = model(_a )
_a = torch.nn.functional.cross_entropy(_a , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_a , _a ):
_a = F'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_a = os.path.join(args.output_dir , _a )
accelerator.save_state(_a )
model.eval()
_a = 0
_a = 0
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_a = {k: v.to(accelerator.device ) for k, v in batch.items()}
_a = (batch['''image'''] - mean) / std
with torch.no_grad():
_a = model(_a )
_a = outputs.argmax(dim=-1 )
_a = accelerator.gather_for_metrics((predictions, batch["label"]) )
_a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(_a ),
"epoch": epoch,
} , step=_a , )
if checkpointing_steps == "epoch":
_a = F'epoch_{epoch}'
if args.output_dir is not None:
_a = os.path.join(args.output_dir , _a )
accelerator.save_state(_a )
if args.with_tracking:
accelerator.end_training()
def _lowerCamelCase ( ) -> Dict:
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=_a , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=_a , default=_a , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=_a , default=_a , help="Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch." , )
parser.add_argument(
"--output_dir" , type=_a , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_a , default=_a , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_a , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_a = parser.parse_args()
_a = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 692
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( _a , _a ) -> str | Literal[False]:
'''simple docstring'''
lowercase_ :str = list(_a )
lowercase_ :Dict = list(_a )
lowercase_ :int = 0
for i in range(len(_a ) ):
if lista[i] != lista[i]:
count += 1
lowercase_ :List[str] = '''_'''
if count > 1:
return False
else:
return "".join(_a )
def UpperCamelCase ( _a ) -> list[str]:
'''simple docstring'''
lowercase_ :List[Any] = []
while True:
lowercase_ :List[Any] = ['''$'''] * len(_a )
lowercase_ :Any = []
for i in range(len(_a ) ):
for j in range(i + 1 , len(_a ) ):
lowercase_ :Optional[Any] = compare_string(binary[i] , binary[j] )
if k is False:
lowercase_ :str = '''*'''
lowercase_ :Any = '''*'''
temp.append('''X''' )
for i in range(len(_a ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_a ) == 0:
return pi
lowercase_ :int = list(set(_a ) )
def UpperCamelCase ( _a , _a ) -> list[str]:
'''simple docstring'''
lowercase_ :Union[str, Any] = []
for minterm in minterms:
lowercase_ :str = ''''''
for _ in range(_a ):
lowercase_ :Any = str(minterm % 2 ) + string
minterm //= 2
temp.append(_a )
return temp
def UpperCamelCase ( _a , _a , _a ) -> bool:
'''simple docstring'''
lowercase_ :Dict = list(_a )
lowercase_ :Dict = list(_a )
lowercase_ :Dict = 0
for i in range(len(_a ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( _a , _a ) -> list[str]:
'''simple docstring'''
lowercase_ :Union[str, Any] = []
lowercase_ :Optional[Any] = [0] * len(_a )
for i in range(len(chart[0] ) ):
lowercase_ :int = 0
lowercase_ :List[Any] = -1
for j in range(len(_a ) ):
if chart[j][i] == 1:
count += 1
lowercase_ :Dict = j
if count == 1:
lowercase_ :Any = 1
for i in range(len(_a ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_a ) ):
lowercase_ :str = 0
temp.append(prime_implicants[i] )
while True:
lowercase_ :List[Any] = 0
lowercase_ :Any = -1
lowercase_ :Any = 0
for i in range(len(_a ) ):
lowercase_ :str = chart[i].count(1 )
if count_n > max_n:
lowercase_ :List[Any] = count_n
lowercase_ :str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_a ) ):
lowercase_ :Dict = 0
def UpperCamelCase ( _a , _a ) -> list[list[int]]:
'''simple docstring'''
lowercase_ :List[Any] = [[0 for x in range(len(_a ) )] for x in range(len(_a ) )]
for i in range(len(_a ) ):
lowercase_ :List[Any] = prime_implicants[i].count('''_''' )
for j in range(len(_a ) ):
if is_for_table(prime_implicants[i] , binary[j] , _a ):
lowercase_ :str = 1
return chart
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :Dict = int(input('''Enter the no. of variables\n''' ) )
lowercase_ :int = [
float(_a )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
lowercase_ :Tuple = decimal_to_binary(_a , _a )
lowercase_ :str = check(_a )
print('''Prime Implicants are:''' )
print(_a )
lowercase_ :Union[str, Any] = prime_implicant_chart(_a , _a )
lowercase_ :int = selection(_a , _a )
print('''Essential Prime Implicants are:''' )
print(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 257
| 0
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
a : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
a : List[Any] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
a : Any = BeautifulSoup(res.text, "html.parser")
a : int = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 609
|
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ResnetDownsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[Any] = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = AttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = CrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = "down"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : str = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = SimpleCrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[int] = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : List[str] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = SkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AttnSkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = AttnDownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = "down"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = UNetMidBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "mid"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UNetMidBlockaDCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[int] = "mid"
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = "mid"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = UpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ResnetUpsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = CrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Any = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = SimpleCrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case , include_encoder_hidden_states=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Optional[int] = 3_2
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = SkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[Any] = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = UpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase : Any = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(snake_case )
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AttnUpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = "up"
@property
def A_ ( self ):
'''simple docstring'''
return super().get_dummy_input(include_temb=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(snake_case )
| 609
| 1
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE :Any = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : List[str] , snake_case_ : bool = True ):
_UpperCAmelCase = {} # dictionary of lists
_UpperCAmelCase = directed
def lowercase ( self : List[Any] , snake_case_ : T , snake_case_ : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
self.adj_list[destination_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
_UpperCAmelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case_ )
_UpperCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_UpperCAmelCase = [destination_vertex]
_UpperCAmelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
_UpperCAmelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_UpperCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_UpperCAmelCase = [destination_vertex]
_UpperCAmelCase = []
return self
def __repr__( self : List[str] ):
return pformat(self.adj_list )
| 236
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Dict = """dandelin/vilt-b32-finetuned-vqa"""
_lowerCamelCase : List[Any] = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
_lowerCamelCase : str = """image_qa"""
_lowerCamelCase : List[str] = AutoProcessor
_lowerCamelCase : Optional[int] = AutoModelForVisualQuestionAnswering
_lowerCamelCase : Tuple = ["""image""", """text"""]
_lowerCamelCase : Optional[int] = ["""text"""]
def __init__( self : int , *snake_case_ : Tuple , **snake_case_ : Optional[int] ):
requires_backends(self , ["vision"] )
super().__init__(*snake_case_ , **snake_case_ )
def lowercase ( self : List[Any] , snake_case_ : "Image" , snake_case_ : str ):
return self.pre_processor(snake_case_ , snake_case_ , return_tensors="pt" )
def lowercase ( self : List[Any] , snake_case_ : Dict ):
with torch.no_grad():
return self.model(**snake_case_ ).logits
def lowercase ( self : int , snake_case_ : Any ):
_UpperCAmelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 236
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self :List[str] ,__UpperCAmelCase :Optional[int] ,__UpperCAmelCase :List[str]=3 ,__UpperCAmelCase :List[Any]=7 ,__UpperCAmelCase :Dict=True ,__UpperCAmelCase :int=True ,__UpperCAmelCase :List[str]=False ,__UpperCAmelCase :Dict=True ,__UpperCAmelCase :str=99 ,__UpperCAmelCase :str=32 ,__UpperCAmelCase :Dict=5 ,__UpperCAmelCase :Dict=4 ,__UpperCAmelCase :Union[str, Any]=37 ,__UpperCAmelCase :Optional[int]="gelu" ,__UpperCAmelCase :Any=0.1 ,__UpperCAmelCase :int=0.1 ,__UpperCAmelCase :Optional[int]=5_12 ,__UpperCAmelCase :int=16 ,__UpperCAmelCase :int=2 ,__UpperCAmelCase :int=0.02 ,__UpperCAmelCase :List[Any]=3 ,__UpperCAmelCase :Union[str, Any]=4 ,__UpperCAmelCase :Optional[int]=None ,) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Any = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Tuple = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Optional[int] = num_choices
lowerCamelCase__ : Tuple = scope
def lowercase_ ( self :Tuple ) -> str:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ : List[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] ,self.num_choices )
lowerCamelCase__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self :int ) -> str:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,pad_token_id=1 ,new_decoder_architecture=__UpperCAmelCase ,)
def lowercase_ ( self :List[Any] ,__UpperCAmelCase :int ,__UpperCAmelCase :Any ,__UpperCAmelCase :Optional[int] ,__UpperCAmelCase :str ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Any ,__UpperCAmelCase :Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : List[str] = FalconModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : Any = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self :List[Any] ,__UpperCAmelCase :Dict ,__UpperCAmelCase :List[Any] ,__UpperCAmelCase :List[str] ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Optional[int] ,__UpperCAmelCase :Any ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :int ,__UpperCAmelCase :Dict ,) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : int = True
lowerCamelCase__ : List[str] = FalconModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,)
lowerCamelCase__ : Tuple = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,)
lowerCamelCase__ : Optional[int] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self :List[Any] ,__UpperCAmelCase :Optional[Any] ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Optional[Any] ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :str ,__UpperCAmelCase :int ,__UpperCAmelCase :Dict ,__UpperCAmelCase :str ,__UpperCAmelCase :Union[str, Any] ,) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[Any] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self :str ,__UpperCAmelCase :Optional[Any] ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :str ,__UpperCAmelCase :int ,__UpperCAmelCase :List[Any] ,__UpperCAmelCase :Tuple ,) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[Any] = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
lowerCamelCase__ : Dict = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,use_cache=__UpperCAmelCase ,)
lowerCamelCase__ : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : List[str] = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowerCamelCase__ : List[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
lowerCamelCase__ : str = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,)['''hidden_states'''][0]
lowerCamelCase__ : List[Any] = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,past_key_values=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,)['''hidden_states'''][0]
# select random slice
lowerCamelCase__ : str = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowerCamelCase__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-3 ) )
def lowercase_ ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def lowercase_ ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Tuple = FalconModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def lowercase_ ( self :Optional[int] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self :List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ ( self :Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ , *lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase__ : Tuple = alibi
self.model_tester.create_and_check_model(__UpperCAmelCase ,*__UpperCAmelCase )
def lowercase_ ( self :Tuple ) -> int:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : Optional[Any] = input_dict['''input_ids''']
lowerCamelCase__ : List[str] = input_ids.ne(1 ).to(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowerCamelCase__ : Tuple = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[Any] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = 3
lowerCamelCase__ : Optional[int] = '''single_label_classification'''
lowerCamelCase__ : Optional[int] = input_dict['''input_ids''']
lowerCamelCase__ : Tuple = input_ids.ne(1 ).to(__UpperCAmelCase )
lowerCamelCase__ : int = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowerCamelCase__ : List[str] = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[Any] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = input_dict['''input_ids''']
lowerCamelCase__ : Union[str, Any] = FalconForCausalLM(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : Dict = model(__UpperCAmelCase ,use_cache=__UpperCAmelCase )
lowerCamelCase__ : Any = input_ids.shape[0]
lowerCamelCase__ : Optional[int] = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase__ : List[Any] = model._convert_cache_to_standard_format(__UpperCAmelCase ,__UpperCAmelCase )
for layer in range(len(__UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase_ ( self :str ) -> Any:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Any = 3
lowerCamelCase__ : Tuple = '''multi_label_classification'''
lowerCamelCase__ : str = input_dict['''input_ids''']
lowerCamelCase__ : Any = input_ids.ne(1 ).to(__UpperCAmelCase )
lowerCamelCase__ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ : Optional[Any] = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self :Optional[int] ) -> int:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__UpperCAmelCase ,'''use_cache''' ):
return
lowerCamelCase__ : List[Any] = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
if "use_cache" not in inputs:
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Optional[int] = model(**__UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase__ : Optional[Any] = (
getattr(__UpperCAmelCase ,'''decoder_layers''' ,__UpperCAmelCase )
or getattr(__UpperCAmelCase ,'''num_decoder_layers''' ,__UpperCAmelCase )
or config.num_hidden_layers
)
lowerCamelCase__ : int = getattr(__UpperCAmelCase ,'''num_kv_heads''' ,config.num_attention_heads )
lowerCamelCase__ : Any = getattr(__UpperCAmelCase ,'''d_model''' ,config.hidden_size )
lowerCamelCase__ : Union[str, Any] = embed_dim // num_attention_heads
lowerCamelCase__ : Tuple = outputs['''past_key_values''']
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = inputs['''input_ids'''].shape
for i in range(__UpperCAmelCase ):
if config.new_decoder_architecture:
lowerCamelCase__ : List[Any] = config.num_attention_heads
elif config.multi_query:
lowerCamelCase__ : Optional[Any] = 1
self.assertEqual(len(past_kv[0] ) ,2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowercase_ ( self :List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
lowerCamelCase__ : Any = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__UpperCAmelCase )
lowerCamelCase__ : str = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(__UpperCAmelCase )
lowerCamelCase__ : List[str] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
lowerCamelCase__ : Tuple = model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=19 )
lowerCamelCase__ : List[str] = tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
@slow
def lowercase_ ( self :int ) -> int:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase__ : Tuple = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(__UpperCAmelCase )
lowerCamelCase__ : Dict = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(__UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=4 )
model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=4 )
model.generate(**__UpperCAmelCase ,num_beams=2 ,max_new_tokens=4 )
@slow
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCamelCase__ : Any = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(device=__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(__UpperCAmelCase )
# Test results are the same with and without cache
lowerCamelCase__ : Optional[int] = model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=20 ,use_cache=__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=20 ,use_cache=__UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 121
|
"""simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase__ : Tuple = remove_duplicates(key.upper() )
lowerCamelCase__ : Optional[Any] = len(_lowercase )
# First fill cipher with key characters
lowerCamelCase__ : int = {alphabet[i]: char for i, char in enumerate(_lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowercase ) , 26 ):
lowerCamelCase__ : Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase__ : Optional[Any] = alphabet[i - offset]
lowerCamelCase__ : Union[str, Any] = char
return cipher_alphabet
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
return "".join(cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Tuple = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = input('''Enter message to encode or decode: ''' ).strip()
lowerCamelCase__ : List[str] = input('''Enter keyword: ''' ).strip()
lowerCamelCase__ : int = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCamelCase__ : int = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCamelCase__ : Optional[Any] = create_cipher_map(_lowercase )
print(func(_lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Any = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Tuple = "mobilenet_v2"
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=1.0 , __UpperCAmelCase=8 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=3_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu6" , __UpperCAmelCase=True , __UpperCAmelCase=0.8 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0_01 , __UpperCAmelCase=2_5_5 , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
__lowercase = num_channels
__lowercase = image_size
__lowercase = depth_multiplier
__lowercase = depth_divisible_by
__lowercase = min_depth
__lowercase = expand_ratio
__lowercase = output_stride
__lowercase = first_layer_is_expansion
__lowercase = finegrained_output
__lowercase = hidden_act
__lowercase = tf_padding
__lowercase = classifier_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = semantic_loss_ignore_index
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Dict = version.parse("1.11" )
@property
def __magic_name__ ( self ):
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def __magic_name__ ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def __magic_name__ ( self ):
"""simple docstring"""
return 1E-4
| 566
|
'''simple docstring'''
import copy
import re
class lowerCamelCase__:
UpperCamelCase : Dict = "hp"
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : str = None
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = prefix
__lowercase = defaults
cls.build_naming_info()
@staticmethod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if len(__UpperCAmelCase ) == 0:
return ""
__lowercase = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__UpperCAmelCase ) + 1 ):
__lowercase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__lowercase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__UpperCAmelCase ):
__lowercase = """"""
while integer != 0:
__lowercase = chr(ord("""A""" ) + integer % 1_0 ) + s
integer //= 1_0
return s
__lowercase = 0
while True:
__lowercase = word + """#""" + int_to_alphabetic(__UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
__lowercase = sword
break
__lowercase = short_word
__lowercase = word
return short_word
@staticmethod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = param_name.split("""_""" )
__lowercase = [TrialShortNamer.shortname_for_word(__UpperCAmelCase , __UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__lowercase = ["""""", """_"""]
for separator in separators:
__lowercase = separator.join(__UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
__lowercase = shortname
__lowercase = param_name
return shortname
return param_name
@staticmethod
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = TrialShortNamer.shortname_for_key(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = short_name
__lowercase = param_name
@classmethod
def __magic_name__ ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
__lowercase = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
__lowercase = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = info
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
__lowercase = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__lowercase = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowercase = 1 if v else 0
__lowercase = """""" if isinstance(__UpperCAmelCase , (int, float) ) else """-"""
__lowercase = F'''{key}{sep}{v}'''
name.append(__UpperCAmelCase )
return "_".join(__UpperCAmelCase )
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__lowercase = []
else:
__lowercase = repr.split("""_""" )
__lowercase = {}
for value in values:
if "-" in value:
__lowercase , __lowercase = value.split("""-""" )
else:
__lowercase = re.sub("""[0-9.]""" , """""" , __UpperCAmelCase )
__lowercase = float(re.sub("""[^0-9.]""" , """""" , __UpperCAmelCase ) )
__lowercase = cls.NAMING_INFO["""reverse_short_param"""][p_k]
__lowercase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__lowercase = cls.DEFAULTS[k]
return parameters
| 566
| 1
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_a : int = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_a : List[Any] = {
'facebook/blenderbot_small-90M': 512,
}
class a_ ( a ):
A__ : int = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = BlenderbotSmallTokenizer
def __init__( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]="<|endoftext|>" , UpperCAmelCase__ : List[Any]="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : Tuple = add_prefix_space
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=None ):
"""simple docstring"""
snake_case : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
snake_case : Optional[int] = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84
| 1
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Optional[int] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Optional[Any] = 'ernie_m'
UpperCamelCase_ :Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int = 250_002 , SCREAMING_SNAKE_CASE_ : int = 768 , SCREAMING_SNAKE_CASE_ : int = 12 , SCREAMING_SNAKE_CASE_ : int = 12 , SCREAMING_SNAKE_CASE_ : int = 3_072 , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 514 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : float = 1e-05 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = is_decoder
lowerCAmelCase__ = act_dropout
| 668
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : int ) -> int:
'''simple docstring'''
if exitstatus == 5:
lowerCAmelCase__ = 0
# Doctest custom flag to ignore output.
_UpperCAmelCase : Any = doctest.register_optionflag("IGNORE_RESULT")
_UpperCAmelCase : Dict = doctest.OutputChecker
class lowerCAmelCase_ ( snake_case__ ):
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Union[str, Any] = CustomOutputChecker
_UpperCAmelCase : Dict = HfDoctestModule
_UpperCAmelCase : List[str] = HfDocTestParser
| 668
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase_ : Optional[int] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
"""simple docstring"""
__A = XLNetConfig.from_json_file(__snake_case )
__A = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
__A = finetuning_task
__A = GLUE_TASKS_NUM_LABELS[finetuning_task]
__A = XLNetForSequenceClassification(__snake_case )
elif "squad" in finetuning_task:
__A = finetuning_task
__A = XLNetForQuestionAnswering(__snake_case )
else:
__A = XLNetLMHeadModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
__A = os.path.join(__snake_case , __snake_case )
__A = os.path.join(__snake_case , __snake_case )
print(f'Save PyTorch model to {os.path.abspath(__snake_case )}' )
torch.save(model.state_dict() , __snake_case )
print(f'Save configuration file to {os.path.abspath(__snake_case )}' )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
lowercase_ : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 707
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
lowercase_ = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
lowercase_ = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
lowercase_ = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ), reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''], )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : List[Any], _lowerCamelCase : str, _lowerCamelCase : Dict=False ):
'''simple docstring'''
__A = spearmanr(_lowerCamelCase, _lowerCamelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 215
| 0
|
'''simple docstring'''
from math import sqrt
def UpperCamelCase__ ( lowerCAmelCase = 1_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 207
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
A_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _UpperCamelCase ( A , A , A , A , A ):
for attribute in key.split("." ):
UpperCamelCase_ =getattr(A , A )
if weight_type is not None:
UpperCamelCase_ =getattr(A , A ).shape
else:
UpperCamelCase_ =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase_ =value
elif weight_type == "weight_g":
UpperCamelCase_ =value
elif weight_type == "weight_v":
UpperCamelCase_ =value
elif weight_type == "bias":
UpperCamelCase_ =value
else:
UpperCamelCase_ =value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCamelCase ( A , A ):
UpperCamelCase_ =[]
UpperCamelCase_ =fairseq_model.state_dict()
UpperCamelCase_ =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCamelCase_ =None
for name, value in fairseq_dict.items():
UpperCamelCase_ =False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase_ =True
elif name.split("." )[0] == "proj":
UpperCamelCase_ =fairseq_model.proj
UpperCamelCase_ =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase_ =True
if "*" in mapped_key:
UpperCamelCase_ =name.split(A )[0].split("." )[-2]
UpperCamelCase_ =mapped_key.replace("*" , A )
if "weight_g" in name:
UpperCamelCase_ ="weight_g"
elif "weight_v" in name:
UpperCamelCase_ ="weight_v"
elif "bias" in name:
UpperCamelCase_ ="bias"
elif "weight" in name:
UpperCamelCase_ ="weight"
else:
UpperCamelCase_ =None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _UpperCamelCase ( A , A , A , A , A ):
UpperCamelCase_ =full_name.split("conv_layers." )[-1]
UpperCamelCase_ =name.split("." )
UpperCamelCase_ =int(items[0] )
UpperCamelCase_ =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase_ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase_ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase_ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase_ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
def _UpperCamelCase ( A ):
UpperCamelCase_ , UpperCamelCase_ =emb.weight.shape
UpperCamelCase_ =nn.Linear(A , A , bias=A )
UpperCamelCase_ =emb.weight.data
return lin_layer
def _UpperCamelCase ( A ):
with open(A , "r" , encoding="utf-8" ) as f:
UpperCamelCase_ =f.readlines()
UpperCamelCase_ =[line.split(" " )[0] for line in lines]
UpperCamelCase_ =len(A )
UpperCamelCase_ ={
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _UpperCamelCase ( A , A , A , A , A , A , A , ):
UpperCamelCase_ =WavaVecaConfig.from_pretrained(A )
UpperCamelCase_ =SpeechaTextaConfig.from_pretrained(
A , vocab_size=A , decoder_layers=A , do_stable_layer_norm=A )
UpperCamelCase_ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCamelCase_ =model[0].eval()
# set weights for wav2vec2 encoder
UpperCamelCase_ =WavaVecaModel(A )
UpperCamelCase_ =recursively_load_weights_wavaveca(model.encoder , A )
UpperCamelCase_ =SpeechaTextaForCausalLM(A )
UpperCamelCase_ , UpperCamelCase_ =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A )
# set output linear layer
unexpected_keys.remove("embed_out" )
UpperCamelCase_ =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCamelCase_ =SpeechEncoderDecoderModel(encoder=A , decoder=A )
UpperCamelCase_ =False
# add projection layer
UpperCamelCase_ =nn.Parameter(projection_layer.weight )
UpperCamelCase_ =nn.Parameter(projection_layer.bias )
UpperCamelCase_ =create_vocab_dict(A )
with open(os.path.join(A , "vocab.json" ) , "w" ) as fp:
json.dump(A , A )
UpperCamelCase_ =SpeechaTextaTokenizer(os.path.join(A , "vocab.json" ) )
tokenizer.save_pretrained(A )
UpperCamelCase_ =hf_wavavec.config.to_dict()
UpperCamelCase_ =tokenizer.pad_token_id
UpperCamelCase_ =tokenizer.bos_token_id
UpperCamelCase_ =tokenizer.eos_token_id
UpperCamelCase_ ="speech_to_text_2"
UpperCamelCase_ ="wav2vec2"
UpperCamelCase_ =SpeechEncoderDecoderConfig.from_dict(A )
hf_wavavec.save_pretrained(A )
feature_extractor.save_pretrained(A )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 391
| 0
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : np.ndarray , __a : Union[int, Iterable[int]] , __a : bool , __a : int ):
"""simple docstring"""
def constraint_to_multiple_of(__a : Optional[Any] , __a : List[Any] , __a : Dict=0 , __a : int=None ):
_a : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_a : str = math.floor(val / multiple ) * multiple
if x < min_val:
_a : List[Any] = math.ceil(val / multiple ) * multiple
return x
_a : List[str] = (output_size, output_size) if isinstance(__a , __a ) else output_size
_a, _a : Optional[int] = get_image_size(__a )
_a, _a : Any = output_size
# determine new height and width
_a : Dict = output_height / input_height
_a : Union[str, Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_a : Union[str, Any] = scale_width
else:
# fit height
_a : str = scale_height
_a : str = constraint_to_multiple_of(scale_height * input_height , multiple=__a )
_a : Any = constraint_to_multiple_of(scale_width * input_width , multiple=__a )
return (new_height, new_width)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ['''pixel_values''']
def __init__( self : Union[str, Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : bool = False ,_a : int = 1 ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
_a : Any = size if size is not None else {'height': 384, 'width': 384}
_a : List[str] = get_size_dict(_a )
_a : int = do_resize
_a : Tuple = size
_a : Dict = keep_aspect_ratio
_a : Optional[int] = ensure_multiple_of
_a : Optional[Any] = resample
_a : Tuple = do_rescale
_a : Optional[int] = rescale_factor
_a : int = do_normalize
_a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self : int ,_a : np.ndarray ,_a : Dict[str, int] ,_a : bool = False ,_a : int = 1 ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
_a : Tuple = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Dict = get_resize_output_image_size(
_a ,output_size=(size['height'], size['width']) ,keep_aspect_ratio=_a ,multiple=_a ,)
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def __lowercase ( self : str ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Union[str, Any] ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def __lowercase ( self : str ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[Any] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def __lowercase ( self : str ,_a : ImageInput ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : int = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : ChannelDimension = ChannelDimension.FIRST ,**_a : List[str] ,):
'''simple docstring'''
_a : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = size if size is not None else self.size
_a : Union[str, Any] = get_size_dict(_a )
_a : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_a : Union[str, Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_a : Optional[Any] = resample if resample is not None else self.resample
_a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_a : List[Any] = image_mean if image_mean is not None else self.image_mean
_a : Union[str, Any] = image_std if image_std is not None else self.image_std
_a : Optional[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_a : Union[str, Any] = [to_numpy_array(_a ) for image in images]
if do_resize:
_a : str = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_rescale:
_a : List[Any] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
_a : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
_a : Union[str, Any] = [to_channel_dimension_format(_a ,_a ) for image in images]
_a : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_a ,tensor_type=_a )
def __lowercase ( self : List[str] ,_a : List[str] ,_a : List[Tuple] = None ):
'''simple docstring'''
_a : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_a ):
_a : int = target_sizes.numpy()
_a : Any = []
for idx in range(len(_a ) ):
_a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=_a )
_a : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
_a : List[str] = logits.argmax(dim=1 )
_a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 319
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = pipeline(
task='zero-shot-audio-classification' ,model='hf-internal-testing/tiny-clap-htsat-unfused' )
_a : Optional[int] = load_dataset('ashraq/esc50' )
_a : List[str] = dataset['train']['audio'][-1]['array']
_a : Optional[int] = audio_classifier(_a ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(_a ) ,[{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] ,)
@unittest.skip('No models are available in TF' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@slow
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = pipeline(
task='zero-shot-audio-classification' ,model='laion/clap-htsat-unfused' ,)
# This is an audio of a dog
_a : str = load_dataset('ashraq/esc50' )
_a : List[str] = dataset['train']['audio'][-1]['array']
_a : List[str] = audio_classifier(_a ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(_a ) ,[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] ,)
_a : Tuple = audio_classifier([audio] * 5 ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(_a ) ,[
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 ,)
_a : Tuple = audio_classifier(
[audio] * 5 ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ,batch_size=5 )
self.assertEqual(
nested_simplify(_a ) ,[
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 ,)
@unittest.skip('No models are available in TF' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
| 319
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """deberta-v2"""
def __init__( self : str , a_ : List[str]=128_100 , a_ : Any=1_536 , a_ : Tuple=24 , a_ : Optional[Any]=24 , a_ : Optional[Any]=6_144 , a_ : Tuple="gelu" , a_ : Optional[int]=0.1 , a_ : Tuple=0.1 , a_ : Optional[int]=512 , a_ : int=0 , a_ : List[Any]=0.02 , a_ : List[str]=1e-7 , a_ : str=False , a_ : Optional[Any]=-1 , a_ : Union[str, Any]=0 , a_ : Dict=True , a_ : Union[str, Any]=None , a_ : Any=0 , a_ : Optional[Any]="gelu" , **a_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = relative_attention
__snake_case = max_relative_positions
__snake_case = pad_token_id
__snake_case = position_biased_input
# Backwards compatibility
if type(a_ ) == str:
__snake_case = [x.strip() for x in pos_att_type.lower().split("|" )]
__snake_case = pos_att_type
__snake_case = vocab_size
__snake_case = layer_norm_eps
__snake_case = kwargs.get("pooler_hidden_size" , a_ )
__snake_case = pooler_dropout
__snake_case = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : int ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def A ( self : List[str] ):
"""simple docstring"""
return 12
def A ( self : int , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , a_ : "PreTrainedTokenizerBase" = None , ):
"""simple docstring"""
__snake_case = super().generate_dummy_inputs(preprocessor=a_ , framework=a_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 69
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : Any = logging.get_logger(__name__)
def snake_case__ ( _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = original_name.split("." )[0]
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = int(key_list[key_list.index(_snake_case ) - 2] )
UpperCamelCase__ = int(key_list[key_list.index(_snake_case ) - 1] )
UpperCamelCase__ = orig_block_num - offset
UpperCamelCase__ = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def snake_case__ ( _snake_case : Tuple ):
"""simple docstring"""
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ , UpperCamelCase__ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
UpperCamelCase__ = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
UpperCamelCase__ = key[: key.find("proj" )]
UpperCamelCase__ = key.replace(_snake_case , F'patch_embeddings.{total_embed_found}.' )
UpperCamelCase__ = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
UpperCamelCase__ = "poolformer.encoder." + key
if "mlp.fc1" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "norm1" , "before_norm" )
if "norm2" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "norm2" , "after_norm" )
if "layer_scale_1" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
UpperCamelCase__ = key.replace("head" , "classifier" )
UpperCamelCase__ = value
return new_state_dict
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def snake_case__ ( _snake_case : Any , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = PoolFormerConfig()
# set attributes based on model_name
UpperCamelCase__ = "huggingface/label-files"
UpperCamelCase__ = model_name[-3:]
UpperCamelCase__ = 10_00
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = (1, 10_00)
# set config attributes
UpperCamelCase__ = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
UpperCamelCase__ = [2, 2, 6, 2]
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 0.9
elif size == "s24":
UpperCamelCase__ = [4, 4, 12, 4]
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 0.9
elif size == "s36":
UpperCamelCase__ = [6, 6, 18, 6]
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 1E-6
UpperCamelCase__ = 0.9
elif size == "m36":
UpperCamelCase__ = [6, 6, 18, 6]
UpperCamelCase__ = [96, 1_92, 3_84, 7_68]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 1E-6
UpperCamelCase__ = 0.95
elif size == "m48":
UpperCamelCase__ = [8, 8, 24, 8]
UpperCamelCase__ = [96, 1_92, 3_84, 7_68]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 1E-6
UpperCamelCase__ = 0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
UpperCamelCase__ = PoolFormerImageProcessor(crop_pct=_snake_case )
# Prepare image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_snake_case , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
UpperCamelCase__ = torch.load(_snake_case , map_location=torch.device("cpu" ) )
# rename keys
UpperCamelCase__ = rename_keys(_snake_case )
# create HuggingFace model and load state dict
UpperCamelCase__ = PoolFormerForImageClassification(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# Define image processor
UpperCamelCase__ = PoolFormerImageProcessor(crop_pct=_snake_case )
UpperCamelCase__ = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
UpperCamelCase__ = model(_snake_case )
UpperCamelCase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
UpperCamelCase__ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
UpperCamelCase__ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
UpperCamelCase__ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
UpperCamelCase__ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
UpperCamelCase__ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
A : List[str] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 516
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=3 , lowerCamelCase_ :Optional[Any]=30 , lowerCamelCase_ :Dict=4_00 , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ :List[str]=[0.5, 0.5, 0.5] , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Tuple=1 / 2_55 , lowerCamelCase_ :Any=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE : List[Any] = max_resolution
SCREAMING_SNAKE_CASE : List[Any] = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : Dict = image_mean
SCREAMING_SNAKE_CASE : List[str] = image_std
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_pad
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image ):
SCREAMING_SNAKE_CASE : Any = image.size
else:
SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : str = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE : Optional[int] = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE : Tuple = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE : Union[str, Any] = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE : Union[str, Any] = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE : Dict = []
for image in image_inputs:
SCREAMING_SNAKE_CASE : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[0] )[0]
SCREAMING_SNAKE_CASE : List[str] = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : int = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : int = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE : Any = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
SCREAMING_SNAKE_CASE : List[str] = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase_ ) )
# verify boxes
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase_ ) )
# verify orig_size
SCREAMING_SNAKE_CASE : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase_ ) )
# verify size
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase_ ) )
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
SCREAMING_SNAKE_CASE : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE : int = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE : str = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , masks_path=lowerCamelCase_ , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase_ ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase_ ) )
# verify masks
SCREAMING_SNAKE_CASE : Union[str, Any] = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCamelCase_ )
# verify orig_size
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase_ ) )
# verify size
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase_ ) )
| 701
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__ : Union[str, Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 281
|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = """▁"""
lowerCamelCase = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
lowerCamelCase = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
lowerCamelCase = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
lowerCamelCase = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
lowerCamelCase = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ["input_ids"]
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = RESOURCE_FILES_NAMES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase="utf8" , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase = None , **_UpperCAmelCase , ):
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , vocab_file=__lowerCAmelCase , encoding=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
snake_case_ = do_lower_case
snake_case_ = sentencepiece_model_ckpt
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
snake_case_ = self.load_vocab(filepath=__lowerCAmelCase )
else:
snake_case_ = {self.sp_model.id_to_piece(__lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
snake_case_ = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if text is None:
return None
snake_case_ = self.tokenize(__lowerCAmelCase )
snake_case_ , snake_case_ = '''''', []
for i, ch in enumerate(__lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
snake_case_ = self.SP_CHAR_MAPPING.get(__lowerCAmelCase )
else:
snake_case_ = unicodedata.normalize('''NFKC''' , __lowerCAmelCase )
if self.is_whitespace(__lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__lowerCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ = normalized_text, [], 0
if self.do_lower_case:
snake_case_ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
snake_case_ = token[1:]
snake_case_ = text[offset:].index(__lowerCAmelCase ) + offset
snake_case_ = start + len(__lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
snake_case_ = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
return len(self.vocab )
def UpperCamelCase__ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , _UpperCAmelCase ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return "".join((self.SP_CHAR_MAPPING.get(__lowerCAmelCase , __lowerCAmelCase ) for c in text) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=64 , _UpperCAmelCase=0.1 ):
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
snake_case_ = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
snake_case_ = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
snake_case_ = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
snake_case_ = self.sp_model.EncodeAsPieces(__lowerCAmelCase )
else:
snake_case_ = self.sp_model.SampleEncodeAsPieces(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case_ = []
for pi, piece in enumerate(__lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__lowerCAmelCase ) and pi != 0:
new_pieces.append(__lowerCAmelCase )
continue
else:
continue
snake_case_ = 0
for i, chunk in enumerate(__lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__lowerCAmelCase ) or self.is_punct(__lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__lowerCAmelCase )
snake_case_ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
snake_case_ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
snake_case_ = i
if len(__lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = self.convert_ids_to_tokens(__lowerCAmelCase )
snake_case_ = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return self.vocab.get(__lowerCAmelCase , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return self.reverse_vocab.get(__lowerCAmelCase , self.unk_token )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__lowerCAmelCase ) + 1) + [1] * (len(__lowerCAmelCase ) + 3)
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__lowerCAmelCase ) == 1:
snake_case_ = unicodedata.category(__lowerCAmelCase )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = {}
with io.open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__lowerCAmelCase ):
snake_case_ = line.rstrip('''\n''' )
snake_case_ = int(__lowerCAmelCase )
return token_to_idx
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
snake_case_ = 0
if os.path.isdir(__lowerCAmelCase ):
snake_case_ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
snake_case_ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
snake_case_ = token_index
writer.write(token + '''\n''' )
index += 1
snake_case_ = os.path.join(__lowerCAmelCase , '''sentencepiece.bpe.model''' )
with open(__lowerCAmelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (vocab_file,)
| 707
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=2 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 2
def UpperCamelCase__ ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.type_sequence_label_size
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
snake_case_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ = False
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
snake_case_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
snake_case_ = problem_type['''title''']
snake_case_ = problem_type['''num_labels''']
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
snake_case_ = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
snake_case_ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
snake_case_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DeiTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ()-> Optional[int]:
"""simple docstring"""
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
snake_case_ = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
_UpperCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCAmelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
| 531
| 0
|
from __future__ import annotations
from typing import Any
class _lowercase :
def __init__( self , a , a , a = 0 ):
snake_case__ , snake_case__ : str =row, column
snake_case__ : Any =[[default_value for c in range(a )] for r in range(a )]
def __str__( self ):
snake_case__ : List[Any] =F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
snake_case__ : List[str] =0
for row_vector in self.array:
for obj in row_vector:
snake_case__ : Union[str, Any] =max(a , len(str(a ) ) )
snake_case__ : Optional[Any] =F"%{max_element_length}s"
# Make string and return
def single_line(a ) -> str:
nonlocal string_format_identifier
snake_case__ : List[str] ="""["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def lowercase__ ( self , a ):
if not (isinstance(a , (list, tuple) ) and len(a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , a ):
assert self.validate_indicies(a )
return self.array[loc[0]][loc[1]]
def __setitem__( self , a , a ):
assert self.validate_indicies(a )
snake_case__ : Optional[int] =value
def __add__( self , a ):
assert isinstance(a , a )
assert self.row == another.row and self.column == another.column
# Add
snake_case__ : List[str] =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Tuple =self[r, c] + another[r, c]
return result
def __neg__( self ):
snake_case__ : int =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : str =-self[r, c]
return result
def __sub__( self , a ):
return self + (-another)
def __mul__( self , a ):
if isinstance(a , (int, float) ): # Scalar multiplication
snake_case__ : Union[str, Any] =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Union[str, Any] =self[r, c] * another
return result
elif isinstance(a , a ): # Matrix multiplication
assert self.column == another.row
snake_case__ : Any =Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case__ : List[Any] =F"Unsupported type given for another ({type(a )})"
raise TypeError(a )
def lowercase__ ( self ):
snake_case__ : int =Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : List[str] =self[r, c]
return result
def lowercase__ ( self , a , a ):
assert isinstance(a , a ) and isinstance(a , a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case__ : Dict =v.transpose()
snake_case__ : Optional[Any] =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A__ ( ):
'''simple docstring'''
snake_case__ : Union[str, Any] =Matrix(3 , 3 , 0 )
for i in range(3 ):
snake_case__ : Dict =1
print(f"a^(-1) is {ainv}" )
# u, v
snake_case__ : Tuple =Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ : int =1, 2, -3
snake_case__ : Tuple =Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ : int =4, -2, 5
print(f"u is {u}" )
print(f"v is {v}" )
print(f"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(_a , _a )}" )
def A__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 385
|
import string
from math import logaa
def A__ ( _a : str , _a : str ):
'''simple docstring'''
snake_case__ : List[Any] =document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
snake_case__ : Any =document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A__ ( _a : str , _a : str ):
'''simple docstring'''
snake_case__ : str =corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case__ : str =corpus_without_punctuation.split("""\n""" )
snake_case__ : Tuple =term.lower()
return (len([doc for doc in docs if term in doc] ), len(_a ))
def A__ ( _a : int , _a : int , _a : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def A__ ( _a : int , _a : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 385
| 1
|
"""simple docstring"""
import string
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
_UpperCAmelCase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
_UpperCAmelCase = string.ascii_uppercase.find(__UpperCamelCase )
_UpperCAmelCase = num - key
if num < 0:
_UpperCAmelCase = num + len(string.ascii_uppercase )
_UpperCAmelCase = translated + string.ascii_uppercase[num]
else:
_UpperCAmelCase = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = input('Encrypted message: ' )
_UpperCAmelCase = message.upper()
decrypt(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 711
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase :
def __init__( self ):
_UpperCAmelCase = {}
def __A ( self , a__ , a__ , a__=1 ):
if self.graph.get(a__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase = [[w, v]]
if not self.graph.get(a__ ):
_UpperCAmelCase = []
def __A ( self ):
return list(self.graph )
def __A ( self , a__ , a__ ):
if self.graph.get(a__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a__ )
def __A ( self , a__=-2 , a__=-1 ):
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return visited
def __A ( self , a__=-1 ):
if c == -1:
_UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(a__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(a__ , a__ , 1 )
def __A ( self , a__=-2 ):
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(a__ )
visited.append(a__ )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __A ( self , a__ ):
_UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __A ( self , a__ ):
return len(self.graph[u] )
def __A ( self , a__=-2 ):
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return sorted_nodes
def __A ( self ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(a__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return list(a__ )
def __A ( self ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(a__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return False
def __A ( self , a__=-2 , a__=-1 ):
_UpperCAmelCase = time()
self.dfs(a__ , a__ )
_UpperCAmelCase = time()
return end - begin
def __A ( self , a__=-2 ):
_UpperCAmelCase = time()
self.bfs(a__ )
_UpperCAmelCase = time()
return end - begin
class lowerCAmelCase :
def __init__( self ):
_UpperCAmelCase = {}
def __A ( self , a__ , a__ , a__=1 ):
# check if the u exists
if self.graph.get(a__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(a__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase = [[w, u]]
def __A ( self , a__ , a__ ):
if self.graph.get(a__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a__ )
# the other way round
if self.graph.get(a__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(a__ )
def __A ( self , a__=-2 , a__=-1 ):
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return visited
def __A ( self , a__=-1 ):
if c == -1:
_UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(a__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(a__ , a__ , 1 )
def __A ( self , a__=-2 ):
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(a__ )
visited.append(a__ )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __A ( self , a__ ):
return len(self.graph[u] )
def __A ( self ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(a__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return list(a__ )
def __A ( self ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(a__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(a__ ) != 0:
_UpperCAmelCase = stack[len(a__ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(a__ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return False
def __A ( self ):
return list(self.graph )
def __A ( self , a__=-2 , a__=-1 ):
_UpperCAmelCase = time()
self.dfs(a__ , a__ )
_UpperCAmelCase = time()
return end - begin
def __A ( self , a__=-2 ):
_UpperCAmelCase = time()
self.bfs(a__ )
_UpperCAmelCase = time()
return end - begin
| 494
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: List[str]=13 ,__lowerCAmelCase: int=2 ,__lowerCAmelCase: Dict=24 ,__lowerCAmelCase: int=16 ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Optional[int]=32 ,__lowerCAmelCase: Optional[int]=5 ,__lowerCAmelCase: Optional[Any]=4 ,__lowerCAmelCase: Optional[Any]=37 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[Any]=10 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Dict=2 ,__lowerCAmelCase: Dict=2 ,):
'''simple docstring'''
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : Any = max_length
_lowerCamelCase : Any = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : int = scope
_lowerCamelCase : Any = frequency_stride
_lowerCamelCase : Optional[int] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : str = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : List[Any] = frequency_out_dimension * time_out_dimension
_lowerCamelCase : Optional[Any] = num_patches + 2
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : List[Any] = self.get_config()
return config, input_values, labels
def _lowercase ( self: str ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,)
def _lowercase ( self: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Dict = config_and_inputs
_lowerCamelCase : int = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = ASTModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Tuple = ["input_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = torchaudio.load(_lowerCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Dict ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.default_feature_extractor
_lowerCamelCase : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.default_feature_extractor
_lowerCamelCase, _lowerCamelCase : Any = prepare_audio()
_lowerCamelCase : int = audio.squeeze().numpy()
_lowerCamelCase : List[str] = feature_extractor(__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : str = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase_ = """\
Text data.
Second line of data."""
UpperCamelCase_ = """file"""
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[str, Any]:
_lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
_lowerCAmelCase : Tuple = bytes(_lowerCamelCase , """utf-8""" )
with zstd.open(_lowerCamelCase , """wb""" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> Dict:
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , """w""" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int ) -> Optional[Any]:
_lowerCAmelCase : Tuple = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
_lowerCAmelCase : Dict = input_paths[compression_format]
_lowerCAmelCase : List[Any] = tmp_path / """cache"""
_lowerCAmelCase : Optional[int] = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Dict = f.read()
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Optional[int] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict ) -> int:
_lowerCAmelCase : List[str] = """custom_cache"""
_lowerCAmelCase : Tuple = """custom_extracted_dir"""
_lowerCAmelCase : Dict = tmp_path / """custom_extracted_path"""
if default_extracted:
_lowerCAmelCase : int = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _lowerCamelCase )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_lowerCamelCase ) )
_lowerCAmelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCAmelCase : Optional[Any] = xz_file
_lowerCAmelCase : Any = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCAmelCase : List[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Dict:
# absolute path
_lowerCAmelCase : List[Any] = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCAmelCase : Any = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> Optional[int]:
# absolute path
_lowerCAmelCase : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCAmelCase : int = """./__missing_file__.txt"""
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase : str = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowerCamelCase )
def _UpperCAmelCase ( ) -> str:
with pytest.raises(_lowerCamelCase ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : int = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_lowerCamelCase ):
http_get("""https://huggingface.co""" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> Optional[int]:
_lowerCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_lowerCamelCase ):
ftp_get("""ftp://huggingface.co""" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_lowerCamelCase ):
fsspec_get("""s3://huggingface.co""" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("""s3://huggingface.co""" )
| 384
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'dpt'
def __init__( self , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=3_84 , snake_case_=16 , snake_case_=3 , snake_case_=False , snake_case_=True , snake_case_=[2, 5, 8, 11] , snake_case_="project" , snake_case_=[4, 2, 1, 0.5] , snake_case_=[96, 1_92, 3_84, 7_68] , snake_case_=2_56 , snake_case_=-1 , snake_case_=False , snake_case_=True , snake_case_=0.4 , snake_case_=2_55 , snake_case_=0.1 , snake_case_=[1, 10_24, 24, 24] , snake_case_=[0, 1] , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =hidden_size
lowercase =is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase ={
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
lowercase =BitConfig(**snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase =BitConfig(**snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
lowercase =backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
lowercase =backbone_featmap_shape
lowercase =neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
lowercase =None
lowercase =None
lowercase =[]
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =initializer_range
lowercase =layer_norm_eps
lowercase =image_size
lowercase =patch_size
lowercase =num_channels
lowercase =qkv_bias
lowercase =backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
lowercase =readout_type
lowercase =reassemble_factors
lowercase =neck_hidden_sizes
lowercase =fusion_hidden_size
lowercase =head_in_index
lowercase =use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase =use_auxiliary_head
lowercase =auxiliary_loss_weight
lowercase =semantic_loss_ignore_index
lowercase =semantic_classifier_dropout
def _A( self ):
lowercase =copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase =self.backbone_config.to_dict()
lowercase =self.__class__.model_type
return output
| 145
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_UpperCAmelCase : Tuple = False
class __magic_name__ ( unittest.TestCase ):
def _A( self , snake_case_=32 ):
set_seed(0 )
lowercase =UNetaDModel(sample_size=snake_case_ , in_channels=3 , out_channels=3 )
lowercase =torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def _A( self ):
lowercase ='''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase =DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=snake_case_ , )
lowercase =DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=snake_case_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase =[torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(snake_case_ ) for _ in range(4 )]
lowercase =[torch.randn((4, 3, 32, 32) ).to(snake_case_ ) for _ in range(4 )]
lowercase =[torch.randint(0 , 10_00 , (4,) ).long().to(snake_case_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase , lowercase =self.get_model_optimizer(resolution=32 )
model.train().to(snake_case_ )
for i in range(4 ):
optimizer.zero_grad()
lowercase =ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase =model(snake_case_ , timesteps[i] ).sample
lowercase =torch.nn.functional.mse_loss(snake_case_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase , lowercase =self.get_model_optimizer(resolution=32 )
model.train().to(snake_case_ )
for i in range(4 ):
optimizer.zero_grad()
lowercase =ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase =model(snake_case_ , timesteps[i] ).sample
lowercase =torch.nn.functional.mse_loss(snake_case_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
| 145
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='mctct'
def __init__( self : Optional[int] , __a : Optional[Any]=80_65 , __a : int=15_36 , __a : Union[str, Any]=36 , __a : Tuple=61_44 , __a : List[Any]=4 , __a : Any=3_84 , __a : Dict=9_20 , __a : Dict=1e-5 , __a : Dict=0.3 , __a : Optional[Any]="relu" , __a : str=0.02 , __a : str=0.3 , __a : List[Any]=0.3 , __a : List[Any]=1 , __a : Union[str, Any]=0 , __a : Tuple=2 , __a : Any=1 , __a : List[Any]=0.3 , __a : List[Any]=1 , __a : Optional[Any]=(7,) , __a : int=(3,) , __a : Dict=80 , __a : List[Any]=1 , __a : Optional[Any]=None , __a : Dict="sum" , __a : Dict=False , **__a : int , ):
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = intermediate_size
_a = num_attention_heads
_a = attention_head_dim
_a = max_position_embeddings
_a = layer_norm_eps
_a = layerdrop
_a = hidden_act
_a = initializer_range
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = pad_token_id
_a = bos_token_id
_a = eos_token_id
_a = conv_glu_dim
_a = conv_dropout
_a = num_conv_layers
_a = input_feat_per_channel
_a = input_channels
_a = conv_channels
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# prevents config testing fail with exporting to json
_a = list(__a )
_a = list(__a )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 692
|
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : int , __a : int , __a : int , __a : str=0.0 , __a : Optional[int] = None , __a : str = "geglu" , __a : Optional[int] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : str = "layer_norm" , __a : bool = False , ):
super().__init__()
_a = only_cross_attention
_a = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_a = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_a = AdaLayerNorm(__a , __a )
elif self.use_ada_layer_norm_zero:
_a = AdaLayerNormZero(__a , __a )
else:
_a = nn.LayerNorm(__a , elementwise_affine=__a )
_a = Attention(
query_dim=__a , heads=__a , dim_head=__a , dropout=__a , bias=__a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__a , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_a = (
AdaLayerNorm(__a , __a )
if self.use_ada_layer_norm
else nn.LayerNorm(__a , elementwise_affine=__a )
)
_a = Attention(
query_dim=__a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__a , dim_head=__a , dropout=__a , bias=__a , upcast_attention=__a , ) # is self-attn if encoder_hidden_states is none
else:
_a = None
_a = None
# 3. Feed-forward
_a = nn.LayerNorm(__a , elementwise_affine=__a )
_a = FeedForward(__a , dropout=__a , activation_fn=__a , final_dropout=__a )
# let chunk size default to None
_a = None
_a = 0
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : int ):
# Sets chunk feed-forward
_a = chunk_size
_a = dim
def UpperCamelCase__ ( self : List[str] , __a : torch.FloatTensor , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Dict[str, Any] = None , __a : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_a = self.norma(__a , __a )
elif self.use_ada_layer_norm_zero:
_a , _a , _a , _a , _a = self.norma(
__a , __a , __a , hidden_dtype=hidden_states.dtype )
else:
_a = self.norma(__a )
_a = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_a = self.attna(
__a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__a , **__a , )
if self.use_ada_layer_norm_zero:
_a = gate_msa.unsqueeze(1 ) * attn_output
_a = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_a = (
self.norma(__a , __a ) if self.use_ada_layer_norm else self.norma(__a )
)
_a = self.attna(
__a , encoder_hidden_states=__a , attention_mask=__a , **__a , )
_a = attn_output + hidden_states
# 3. Feed-forward
_a = self.norma(__a )
if self.use_ada_layer_norm_zero:
_a = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
_a = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_a = torch.cat(
[self.ff(__a ) for hid_slice in norm_hidden_states.chunk(__a , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_a = self.ff(__a )
if self.use_ada_layer_norm_zero:
_a = gate_mlp.unsqueeze(1 ) * ff_output
_a = ff_output + hidden_states
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __a : int , __a : Optional[int] = None , __a : int = 4 , __a : float = 0.0 , __a : str = "geglu" , __a : bool = False , ):
super().__init__()
_a = int(dim * mult )
_a = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_a = GELU(__a , __a )
if activation_fn == "gelu-approximate":
_a = GELU(__a , __a , approximate="tanh" )
elif activation_fn == "geglu":
_a = GEGLU(__a , __a )
elif activation_fn == "geglu-approximate":
_a = ApproximateGELU(__a , __a )
_a = nn.ModuleList([] )
# project in
self.net.append(__a )
# project dropout
self.net.append(nn.Dropout(__a ) )
# project out
self.net.append(nn.Linear(__a , __a ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__a ) )
def UpperCamelCase__ ( self : List[Any] , __a : Tuple ):
for module in self.net:
_a = module(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : int , __a : int , __a : int , __a : str = "none" ):
super().__init__()
_a = nn.Linear(__a , __a )
_a = approximate
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[Any] ):
if gate.device.type != "mps":
return F.gelu(__a , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase__ ( self : str , __a : Optional[int] ):
_a = self.proj(__a )
_a = self.gelu(__a )
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : str , __a : int , __a : int ):
super().__init__()
_a = nn.Linear(__a , dim_out * 2 )
def UpperCamelCase__ ( self : List[Any] , __a : Optional[int] ):
if gate.device.type != "mps":
return F.gelu(__a )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
_a , _a = self.proj(__a ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__a )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : int , __a : int ):
super().__init__()
_a = nn.Linear(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Dict ):
_a = self.proj(__a )
return x * torch.sigmoid(1.702 * x )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : str ):
super().__init__()
_a = nn.Embedding(__a , __a )
_a = nn.SiLU()
_a = nn.Linear(__a , embedding_dim * 2 )
_a = nn.LayerNorm(__a , elementwise_affine=__a )
def UpperCamelCase__ ( self : Tuple , __a : Any , __a : Optional[Any] ):
_a = self.linear(self.silu(self.emb(__a ) ) )
_a , _a = torch.chunk(__a , 2 )
_a = self.norm(__a ) * (1 + scale) + shift
return x
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __a : List[Any] , __a : Any ):
super().__init__()
_a = CombinedTimestepLabelEmbeddings(__a , __a )
_a = nn.SiLU()
_a = nn.Linear(__a , 6 * embedding_dim , bias=__a )
_a = nn.LayerNorm(__a , elementwise_affine=__a , eps=1e-6 )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict , __a : List[Any] , __a : Union[str, Any] , __a : List[Any]=None ):
_a = self.linear(self.silu(self.emb(__a , __a , hidden_dtype=__a ) ) )
_a , _a , _a , _a , _a , _a = emb.chunk(6 , dim=1 )
_a = self.norm(__a ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : int , __a : int , __a : int , __a : Optional[str] = None , __a : float = 1e-5 ):
super().__init__()
_a = num_groups
_a = eps
if act_fn is None:
_a = None
else:
_a = get_activation(__a )
_a = nn.Linear(__a , out_dim * 2 )
def UpperCamelCase__ ( self : List[Any] , __a : Optional[Any] , __a : List[Any] ):
if self.act:
_a = self.act(__a )
_a = self.linear(__a )
_a = emb[:, :, None, None]
_a , _a = emb.chunk(2 , dim=1 )
_a = F.group_norm(__a , self.num_groups , eps=self.eps )
_a = x * (1 + scale) + shift
return x
| 692
| 1
|
def _lowerCamelCase ( a_ : int = 1_00):
lowerCamelCase :int = set()
lowerCamelCase :Dict = 0
lowerCamelCase :Union[str, Any] = n + 1 # maximum limit
for a in range(2 , a_):
for b in range(2 , a_):
lowerCamelCase :Tuple = a**b # calculates the current power
collect_powers.add(a_) # adds the result to the set
return len(a_)
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 49
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : int = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : List[str] = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
UpperCAmelCase : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _A( a__ ):
"""simple docstring"""
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = LxmertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
__A : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
__A : str = getattr(_snake_case , normalizer_state.pop('type' ) )
__A : Optional[Any] = do_lower_case
__A : str = strip_accents
__A : Optional[int] = tokenize_chinese_chars
__A : Union[str, Any] = normalizer_class(**_snake_case )
__A : Dict = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : List[Any] = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Union[str, Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 239
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4
| 0
|
import numpy as np
from transformers import Pipeline
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Tuple = np.max(lowerCamelCase, axis=-1, keepdims=lowerCamelCase )
lowercase :List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=lowerCamelCase )
class __lowerCAmelCase ( lowerCAmelCase):
def SCREAMING_SNAKE_CASE ( self: str , **_lowerCAmelCase: List[str] ):
lowercase :Optional[Any] = {}
if "second_text" in kwargs:
lowercase :Optional[int] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any]=None ):
return self.tokenizer(_lowerCAmelCase , text_pair=_lowerCAmelCase , return_tensors=self.framework )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Any ):
return self.model(**_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Tuple ):
lowercase :Optional[int] = model_outputs.logits[0].numpy()
lowercase :Tuple = softmax(_lowerCAmelCase )
lowercase :Optional[Any] = np.argmax(_lowerCAmelCase )
lowercase :str = self.model.config.idalabel[best_class]
lowercase :Union[str, Any] = probabilities[best_class].item()
lowercase :Union[str, Any] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 453
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_UpperCAmelCase : List[Any] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_UpperCAmelCase : Tuple = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_UpperCAmelCase : Any = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 453
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""only integers accepted as input""" )
else:
snake_case__ : str = str(abs(__magic_name__ ) )
snake_case__ : Dict = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("""""".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 38
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669
| 0
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str ) -> str:
"""simple docstring"""
a_ = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str ) -> dict[str, str]:
"""simple docstring"""
a_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
a_ = remove_duplicates(key.upper() )
a_ = len(UpperCamelCase )
# First fill cipher with key characters
a_ = {alphabet[i]: char for i, char in enumerate(UpperCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCamelCase ) , 26 ):
a_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
a_ = alphabet[i - offset]
a_ = char
return cipher_alphabet
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : dict[str, str] ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(UpperCamelCase , UpperCamelCase ) for ch in message.upper() )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : dict[str, str] ) -> str:
"""simple docstring"""
a_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCamelCase , UpperCamelCase ) for ch in message.upper() )
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
a_ = input("""Enter message to encode or decode: """ ).strip()
a_ = input("""Enter keyword: """ ).strip()
a_ = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
a_ = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
a_ = create_cipher_map(UpperCamelCase )
print(func(UpperCamelCase , UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 403
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : "DiagonalGaussianDistribution"
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Union[str, Any] = True
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) , _SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) , _SCREAMING_SNAKE_CASE = (64,) , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "silu" , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 0.1_8_2_1_5 , ):
super().__init__()
# pass init params to Encoder
a_ = Encoder(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , down_block_types=_SCREAMING_SNAKE_CASE , block_out_channels=_SCREAMING_SNAKE_CASE , layers_per_block=_SCREAMING_SNAKE_CASE , act_fn=_SCREAMING_SNAKE_CASE , norm_num_groups=_SCREAMING_SNAKE_CASE , double_z=_SCREAMING_SNAKE_CASE , )
# pass init params to Decoder
a_ = Decoder(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , up_block_types=_SCREAMING_SNAKE_CASE , block_out_channels=_SCREAMING_SNAKE_CASE , layers_per_block=_SCREAMING_SNAKE_CASE , norm_num_groups=_SCREAMING_SNAKE_CASE , act_fn=_SCREAMING_SNAKE_CASE , )
a_ = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
a_ = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
a_ = False
a_ = False
# only relevant if vae tiling is enabled
a_ = self.config.sample_size
a_ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
a_ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
a_ = 0.2_5
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if isinstance(_SCREAMING_SNAKE_CASE , (Encoder, Decoder) ):
a_ = value
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE = True ):
a_ = use_tiling
def __magic_name__ ( self ):
self.enable_tiling(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = True
def __magic_name__ ( self ):
a_ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self ):
a_ = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """set_processor""" ):
a_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return processors
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
a_ = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """set_processor""" ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
if self.use_slicing and x.shape[0] > 1:
a_ = [self.encoder(_SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )]
a_ = torch.cat(_SCREAMING_SNAKE_CASE )
else:
a_ = self.encoder(_SCREAMING_SNAKE_CASE )
a_ = self.quant_conv(_SCREAMING_SNAKE_CASE )
a_ = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
a_ = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
a_ = self.decoder(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
@apply_forward_hook
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
if self.use_slicing and z.shape[0] > 1:
a_ = [self._decode(_SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )]
a_ = torch.cat(_SCREAMING_SNAKE_CASE )
else:
a_ = self._decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = min(a.shape[2] , b.shape[2] , _SCREAMING_SNAKE_CASE )
for y in range(_SCREAMING_SNAKE_CASE ):
a_ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = min(a.shape[3] , b.shape[3] , _SCREAMING_SNAKE_CASE )
for x in range(_SCREAMING_SNAKE_CASE ):
a_ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
a_ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
a_ = int(self.tile_latent_min_size * self.tile_overlap_factor )
a_ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
a_ = []
for i in range(0 , x.shape[2] , _SCREAMING_SNAKE_CASE ):
a_ = []
for j in range(0 , x.shape[3] , _SCREAMING_SNAKE_CASE ):
a_ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
a_ = self.encoder(_SCREAMING_SNAKE_CASE )
a_ = self.quant_conv(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
a_ = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
a_ = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a_ = self.blend_v(rows[i - 1][j] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if j > 0:
a_ = self.blend_h(row[j - 1] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE , dim=3 ) )
a_ = torch.cat(_SCREAMING_SNAKE_CASE , dim=2 )
a_ = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ):
a_ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
a_ = int(self.tile_sample_min_size * self.tile_overlap_factor )
a_ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
a_ = []
for i in range(0 , z.shape[2] , _SCREAMING_SNAKE_CASE ):
a_ = []
for j in range(0 , z.shape[3] , _SCREAMING_SNAKE_CASE ):
a_ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
a_ = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
a_ = self.decoder(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
a_ = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
a_ = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a_ = self.blend_v(rows[i - 1][j] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if j > 0:
a_ = self.blend_h(row[j - 1] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE , dim=3 ) )
a_ = torch.cat(_SCREAMING_SNAKE_CASE , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , ):
a_ = sample
a_ = self.encode(_SCREAMING_SNAKE_CASE ).latent_dist
if sample_posterior:
a_ = posterior.sample(generator=_SCREAMING_SNAKE_CASE )
else:
a_ = posterior.mode()
a_ = self.decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
| 403
| 1
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple =[]
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =[]
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =[]
token.append((F"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") )
return token
def lowerCAmelCase_ ( ):
__magic_name__ : Any =[]
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict ="""imagenet-1k-id2label.json"""
__magic_name__ : Union[str, Any] =1000
__magic_name__ : int ="""huggingface/label-files"""
__magic_name__ : Optional[Any] =num_labels
__magic_name__ : str =json.load(open(cached_download(hf_hub_url(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__magic_name__ : Optional[Any] ={int(lowerCamelCase ): v for k, v in idalabel.items()}
__magic_name__ : List[str] =idalabel
__magic_name__ : Optional[Any] ={v: k for k, v in idalabel.items()}
__magic_name__ : Dict =CvtConfig(num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
__magic_name__ : Tuple =[1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
__magic_name__ : Optional[Any] =[1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__magic_name__ : int =[2, 2, 20]
__magic_name__ : Tuple =[3, 12, 16]
__magic_name__ : Optional[Any] =[192, 768, 1024]
__magic_name__ : str =CvtForImageClassification(lowerCamelCase )
__magic_name__ : Dict =AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
__magic_name__ : str =image_size
__magic_name__ : List[str] =torch.load(lowerCamelCase , map_location=torch.device("""cpu""" ) )
__magic_name__ : int =OrderedDict()
__magic_name__ : Optional[Any] =[]
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__magic_name__ : str =list_of_state_dict + cls_token(lowerCamelCase )
__magic_name__ : List[str] =list_of_state_dict + embeddings(lowerCamelCase )
for cnt in range(config.depth[idx] ):
__magic_name__ : Optional[Any] =list_of_state_dict + attention(lowerCamelCase , lowerCamelCase )
__magic_name__ : List[Any] =list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
__magic_name__ : Optional[int] =original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21
|
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 31
| 0
|
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'autoformer'
_a = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Dict , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "student_t" , lowerCAmelCase : str = "nll" , lowerCAmelCase : int = 1 , lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase : bool = True , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : int = 64 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 32 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : int = 100 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : bool = True , lowerCAmelCase : Tuple=True , lowerCAmelCase : int = 10 , lowerCAmelCase : int = 25 , lowerCAmelCase : int = 3 , **lowerCAmelCase : Tuple , ):
# time series specific configuration
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length if context_length is not None else prediction_length
lowerCAmelCase = distribution_output
lowerCAmelCase = loss
lowerCAmelCase = input_size
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = scaling
lowerCAmelCase = num_dynamic_real_features
lowerCAmelCase = num_static_real_features
lowerCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = cardinality
else:
lowerCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = embedding_dimension
else:
lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase = d_model
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = decoder_layers
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = use_cache
# Autoformer
lowerCAmelCase = label_length
lowerCAmelCase = moving_average
lowerCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def __lowercase ( self : Tuple ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 529
|
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCAmelCase , lowerCAmelCase = array[indexa], array[indexa]
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
if length > 1:
lowerCAmelCase = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> None:
'''simple docstring'''
if length > 1:
lowerCAmelCase = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 529
| 1
|
import math
def _A ( _lowercase ) -> bool:
"""simple docstring"""
return math.sqrt(_lowercase ) * math.sqrt(_lowercase ) == num
def _A ( _lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = 0
UpperCamelCase__ = False
UpperCamelCase__ = 3.0
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {})
self.assertDictEqual(MockClass(a=2).to_kwargs() , {'''a''': 2})
self.assertDictEqual(MockClass(a=2 , b=a__).to_kwargs() , {'''a''': 2, '''b''': True})
self.assertDictEqual(MockClass(a=2 , c=2.2_5).to_kwargs() , {'''a''': 2, '''c''': 2.2_5})
@require_cuda
def snake_case_ ( self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
A__ = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2)
AcceleratorState._reset_state()
A__ = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
A__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0)
self.assertEqual(scaler._growth_factor , 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5)
self.assertEqual(scaler._growth_interval , 2_0_0_0)
self.assertEqual(scaler._enabled , a__)
@require_multi_gpu
def snake_case_ ( self):
A__ = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(a__ , env=os.environ.copy())
if __name__ == "__main__":
_lowercase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_lowercase = Accelerator(kwargs_handlers=[ddp_scaler])
_lowercase = torch.nn.Linear(100, 200)
_lowercase = accelerator.prepare(model)
# Check the values changed in kwargs
_lowercase = ""
_lowercase = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 632
| 0
|
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
super().__init__(*_a , **_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_a )
__a = self.values[key]
def __UpperCAmelCase ( self ):
return (
sum(self.charge_factor - len(_a ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __UpperCAmelCase ( self , _a , _a=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_a ) == 0
):
return key
return super()._collision_resolution(_a , _a )
| 65
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = 300
return config
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a )
__a = model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = MraModel(_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__a = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = ()
def __UpperCAmelCase ( self ):
__a = MraModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''MRA does not output attentions''' )
def __UpperCAmelCase ( self ):
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__a = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 65
| 1
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a_ ( __lowercase : int ) -> Any:
_snake_case = [False] * len(__lowercase )
_snake_case = [-1] * len(__lowercase )
def dfs(__lowercase : int , __lowercase : List[Any] ):
_snake_case = True
_snake_case = c
for u in graph[v]:
if not visited[u]:
dfs(__lowercase , 1 - c )
for i in range(len(__lowercase ) ):
if not visited[i]:
dfs(__lowercase , 0 )
for i in range(len(__lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCamelCase : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 686
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 )
| 686
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Union[str, Any] = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
__UpperCamelCase : Optional[Any] = """▁"""
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = VOCAB_FILES_NAMES
__snake_case :List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Optional[Any] = ['input_ids', 'attention_mask']
__snake_case :Tuple = BarthezTokenizer
def __init__( self : int , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Any="</s>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : int="<s>" , _lowerCAmelCase : Union[str, Any]="<unk>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : Any="<mask>" , **_lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
__lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def _a ( self : str , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 53
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False, False, False
@dataclass
class __A :
UpperCAmelCase__ = None
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = None
# Automatically constructed
UpperCAmelCase__ = "dict"
UpperCAmelCase__ = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase__ = field(default="Audio" ,init=lowerCAmelCase__ ,repr=lowerCAmelCase__ )
def __call__( self : Union[str, Any] ) -> Optional[int]:
return self.pa_type
def lowerCamelCase__ ( self : int , __snake_case : List[str] ) -> int:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install \'soundfile\'.""" ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__magic_name__: str = BytesIO()
sf.write(__UpperCAmelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a \'sampling_rate\' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__magic_name__: str = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
__magic_name__: Optional[Any] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2_7_6_7
__magic_name__: Tuple = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase__ ( self : str , __snake_case : Any , __snake_case : Any = None ) -> str:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
__magic_name__, __magic_name__: Optional[int] = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install \'librosa\' and \'soundfile\'.""" ) from err
__magic_name__: Optional[int] = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
__magic_name__: Dict = token_per_repo_id or {}
__magic_name__: Optional[Any] = path.split("""::""" )[-1]
try:
__magic_name__: int = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__magic_name__: Optional[int] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__magic_name__: Union[str, Any] = None
with xopen(__UpperCAmelCase , """rb""" , use_auth_token=__UpperCAmelCase ) as f:
__magic_name__, __magic_name__: int = sf.read(__UpperCAmelCase )
else:
__magic_name__, __magic_name__: str = sf.read(__UpperCAmelCase )
__magic_name__: List[Any] = array.T
if self.mono:
__magic_name__: Union[str, Any] = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__magic_name__: Optional[Any] = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
__magic_name__: Union[str, Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase__ ( self : str ) -> Tuple:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def lowerCamelCase__ ( self : Optional[int] , __snake_case : List[str] ) -> List[Any]:
if pa.types.is_string(storage.type ):
__magic_name__: Union[str, Any] = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
__magic_name__: Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__magic_name__: Any = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
__magic_name__: Optional[int] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
__magic_name__: List[str] = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__magic_name__: int = storage.field("""bytes""" )
else:
__magic_name__: List[str] = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__magic_name__: Tuple = storage.field("""path""" )
else:
__magic_name__: str = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
__magic_name__: Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : List[str] ) -> List[Any]:
@no_op_if_value_is_null
def path_to_bytes(__snake_case : List[Any] ):
with xopen(__UpperCAmelCase , """rb""" ) as f:
__magic_name__: int = f.read()
return bytes_
__magic_name__: List[str] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__magic_name__: str = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__magic_name__: Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 96
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """canine"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=16384 , __UpperCAmelCase=16 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=0Xe_000 , __UpperCAmelCase=0Xe_001 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=16384 , __UpperCAmelCase=128 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
# Character config:
__lowerCamelCase = downsampling_rate
__lowerCamelCase = upsampling_kernel_size
__lowerCamelCase = num_hash_functions
__lowerCamelCase = num_hash_buckets
__lowerCamelCase = local_transformer_stride
| 175
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase__ = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709
|
from math import pow
def UpperCAmelCase__ ( _A , _A , _A , _A , _A , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
a_ = int(pow(_A , _A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
return current_sum, solutions_count
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Optional[int] = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 235
|
def snake_case (UpperCamelCase : int = 50 ):
'''simple docstring'''
lowerCamelCase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
_A = len(_snake_case )
for i in range(length - 1 ):
_A = i
for k in range(i + 1 , _snake_case ):
if collection[k] < collection[least]:
_A = k
if least != i:
_A , _A = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a = input('''Enter numbers separated by a comma:\n''').strip()
a = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 7
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar("KT")
SCREAMING_SNAKE_CASE_ = TypeVar("VT")
class lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _A = "root" , _A = None ) -> Optional[Any]:
__a : Dict = key
__a : Union[str, Any] = value
__a : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
return f'''Node({self.key}: {self.value})'''
@property
def __magic_name__ ( self ) -> int:
return len(self.forward )
class lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _A = 0.5 , _A = 16 ) -> str:
__a : Node[KT, VT] = Node[KT, VT]()
__a : Optional[Any] = 0
__a : Tuple = p
__a : List[Any] = max_level
def __str__( self ) -> str:
__a : Union[str, Any] = list(self )
if len(_A ) == 0:
return f'''SkipList(level={self.level})'''
__a : Optional[int] = max((len(str(_A ) ) for item in items) , default=4 )
__a : Optional[Any] = max(_A , 4 ) + 4
__a : Dict = self.head
__a : List[str] = []
__a : str = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(_A , '-' ) + '* ' * len(_A ) )
lines.append(' ' * label_size + '| ' * len(_A ) )
while len(node.forward ) != 0:
__a : int = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(_A , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(_A ) )
__a : List[Any] = node.forward
lines.append('None'.ljust(_A ) + '* ' * len(_A ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(_A )
def __iter__( self ) -> Optional[int]:
__a : List[str] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__a : Any = node.forward[0]
def __magic_name__ ( self ) -> int:
__a : str = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __magic_name__ ( self , _A ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
__a : str = []
__a : Optional[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a : Optional[int] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __magic_name__ ( self , _A ) -> Tuple:
__a , __a : Optional[Any] = self._locate_node(_A )
if node is not None:
for i, update_node in enumerate(_A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a : List[Any] = node.forward[i]
else:
__a : Tuple = update_node.forward[:i]
def __magic_name__ ( self , _A , _A ) -> Optional[int]:
__a , __a : Optional[int] = self._locate_node(_A )
if node is not None:
__a : Union[str, Any] = value
else:
__a : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _A ):
update_vector.append(self.head )
__a : str = level
__a : Optional[int] = Node(_A , _A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_A )
else:
__a : Tuple = new_node
def __magic_name__ ( self , _A ) -> VT | None:
__a , __a : str = self._locate_node(_A )
if node is not None:
return node.value
return None
def lowerCAmelCase__ ( ):
__a : str = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
__a : Dict = skip_list.head
__a : Optional[Any] = {}
while node.level != 0:
__a : Union[str, Any] = node.forward[0]
__a : Any = node.value
assert len(SCREAMING_SNAKE_CASE__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowerCAmelCase__ ( ):
__a : Tuple = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
__a : Optional[int] = skip_list.head
__a : Optional[int] = {}
while node.level != 0:
__a : List[Any] = node.forward[0]
__a : Dict = node.value
if len(SCREAMING_SNAKE_CASE__ ) != 4:
print()
assert len(SCREAMING_SNAKE_CASE__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowerCAmelCase__ ( ):
__a : Optional[int] = SkipList()
assert skip_list.find('Some key' ) is None
def lowerCAmelCase__ ( ):
__a : str = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def lowerCAmelCase__ ( ):
__a : List[str] = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def lowerCAmelCase__ ( ):
__a : int = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def lowerCAmelCase__ ( ):
__a : Tuple = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def lowerCAmelCase__ ( ):
__a : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(SCREAMING_SNAKE_CASE__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(SCREAMING_SNAKE_CASE__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCAmelCase__ ( ):
def is_sorted(SCREAMING_SNAKE_CASE__ ):
return all(next_item >= item for item, next_item in zip(SCREAMING_SNAKE_CASE__ , lst[1:] ) )
__a : Any = SkipList()
for i in range(10 ):
skip_list.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
def lowerCAmelCase__ ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCAmelCase__ ( ):
__a : Tuple = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 597
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ) -> None:
if components is None:
_UpperCamelCase =[]
_UpperCamelCase =list(UpperCamelCase__ )
def __len__( self : Dict ) -> int:
return len(self.__components )
def __str__( self : Union[str, Any] ) -> str:
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : int , UpperCamelCase__ : Vector ) -> Vector:
_UpperCamelCase =len(self )
if size == len(UpperCamelCase__ ):
_UpperCamelCase =[self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('''must have the same size''' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ) -> Vector:
_UpperCamelCase =len(self )
if size == len(UpperCamelCase__ ):
_UpperCamelCase =[self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : Optional[Any] , UpperCamelCase__ : float ) -> Vector:
...
@overload
def __mul__( self : int , UpperCamelCase__ : Vector ) -> float:
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ) -> float | Vector:
if isinstance(UpperCamelCase__ , (float, int) ):
_UpperCamelCase =[c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
_UpperCamelCase =len(self )
_UpperCamelCase =[self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('''invalid operand!''' )
def UpperCamelCase__ ( self : List[Any] ) -> Vector:
return Vector(self.__components )
def UpperCamelCase__ ( self : int , UpperCamelCase__ : int ) -> float:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : float ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCamelCase =value
def UpperCamelCase__ ( self : List[str] ) -> float:
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
_UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ) -> float:
_UpperCamelCase =self * other
_UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return Vector([0] * dimension )
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ))
_UpperCamelCase =[0] * dimension
_UpperCamelCase =1
return Vector(__SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and (isinstance(__SCREAMING_SNAKE_CASE , (int, float) ))
)
return x * scalar + y
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
random.seed(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =[random.randint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE )]
return Vector(__SCREAMING_SNAKE_CASE )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> None:
_UpperCamelCase =matrix
_UpperCamelCase =w
_UpperCamelCase =h
def __str__( self : Optional[Any] ) -> str:
_UpperCamelCase =''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase =[]
for i in range(self.__height ):
_UpperCamelCase =[
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : List[Any] , UpperCamelCase__ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase =[]
for i in range(self.__height ):
_UpperCamelCase =[
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : List[Any] , UpperCamelCase__ : float ) -> Matrix:
...
@overload
def __mul__( self : List[Any] , UpperCamelCase__ : Vector ) -> Vector:
...
def __mul__( self : int , UpperCamelCase__ : float | Vector ) -> Vector | Matrix:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
_UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
_UpperCamelCase =[
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
_UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase__ ( self : Optional[Any] ) -> int:
return self.__height
def UpperCamelCase__ ( self : int ) -> int:
return self.__width
def UpperCamelCase__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCamelCase =value
else:
raise Exception('''change_component: indices out of bounds''' )
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
_UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
_UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('''Indices out of bounds''' )
def UpperCamelCase__ ( self : int ) -> float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[[0] * n for _ in range(__SCREAMING_SNAKE_CASE )]
return Matrix(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
random.seed(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =[
[random.randint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE )] for _ in range(__SCREAMING_SNAKE_CASE )
]
return Matrix(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 718
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase :
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCamelCase =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
_UpperCamelCase =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : Any ) -> Any:
_UpperCamelCase =self.get_dummy_components()
_UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =inputs['''prompt''']
_UpperCamelCase =inputs['''generator''']
_UpperCamelCase =inputs['''num_inference_steps''']
_UpperCamelCase =inputs['''output_type''']
if "image" in inputs:
_UpperCamelCase =inputs['''image''']
else:
_UpperCamelCase =None
if "mask_image" in inputs:
_UpperCamelCase =inputs['''mask_image''']
else:
_UpperCamelCase =None
if "original_image" in inputs:
_UpperCamelCase =inputs['''original_image''']
else:
_UpperCamelCase =None
_UpperCamelCase , _UpperCamelCase =pipe.encode_prompt(UpperCamelCase__ )
# inputs with prompt converted to embeddings
_UpperCamelCase ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase =image
if mask_image is not None:
_UpperCamelCase =mask_image
if original_image is not None:
_UpperCamelCase =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase =pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =inputs['''generator''']
_UpperCamelCase =inputs['''num_inference_steps''']
_UpperCamelCase =inputs['''output_type''']
# inputs with prompt converted to embeddings
_UpperCamelCase ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase =image
if mask_image is not None:
_UpperCamelCase =mask_image
if original_image is not None:
_UpperCamelCase =original_image
_UpperCamelCase =pipe_loaded(**UpperCamelCase__ )[0]
_UpperCamelCase =np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
def UpperCamelCase__ ( self : int ) -> str:
_UpperCamelCase =self.get_dummy_components()
_UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =pipe_loaded(**UpperCamelCase__ )[0]
_UpperCamelCase =np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
| 271
| 0
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCamelCase( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Any = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
__a : int = AutoTokenizer.from_pretrained('google/mt5-small' )
__a : Any = tokenizer('Hello there' , return_tensors='np' ).input_ids
__a : Any = tokenizer('Hi I am' , return_tensors='np' ).input_ids
__a : List[str] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__a : Tuple = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
__a : Union[str, Any] = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
__a : Tuple = -(labels.shape[-1] * loss.item())
__a : Optional[Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 47
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47
| 1
|
from __future__ import annotations
import math
def lowerCamelCase__ ( snake_case_ : list , snake_case_ : list ) -> str:
if len(lowerCAmelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCAmelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
__snake_case = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCamelCase__ ( snake_case_ : list , snake_case_ : list ) -> int:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCAmelCase__ ) )
]
def lowerCamelCase__ ( snake_case_ : list , snake_case_ : list ) -> Dict:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCAmelCase__ ) )
]
def lowerCamelCase__ ( snake_case_ : list ) -> Optional[Any]:
if len(lowerCAmelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
__snake_case = len(lowerCAmelCase__ )
__snake_case = matrix_length // 2
__snake_case = [[a[i][j] for j in range(lowerCAmelCase__ , lowerCAmelCase__ )] for i in range(lowerCAmelCase__ )]
__snake_case = [
[a[i][j] for j in range(lowerCAmelCase__ , lowerCAmelCase__ )] for i in range(lowerCAmelCase__ , lowerCAmelCase__ )
]
__snake_case = [[a[i][j] for j in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ )]
__snake_case = [[a[i][j] for j in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ , lowerCAmelCase__ )]
return top_left, top_right, bot_left, bot_right
def lowerCamelCase__ ( snake_case_ : list ) -> str:
return len(lowerCAmelCase__ ), len(matrix[0] )
def lowerCamelCase__ ( snake_case_ : list ) -> Optional[Any]:
print('''\n'''.join(str(lowerCAmelCase__ ) for line in matrix ) )
def lowerCamelCase__ ( snake_case_ : list , snake_case_ : list ) -> Tuple:
if matrix_dimensions(lowerCAmelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCAmelCase__ , lowerCAmelCase__ )
__snake_case = split_matrix(lowerCAmelCase__ )
__snake_case = split_matrix(lowerCAmelCase__ )
__snake_case = actual_strassen(lowerCAmelCase__ , matrix_subtraction(lowerCAmelCase__ , lowerCAmelCase__ ) )
__snake_case = actual_strassen(matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
__snake_case = actual_strassen(matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
__snake_case = actual_strassen(lowerCAmelCase__ , matrix_subtraction(lowerCAmelCase__ , lowerCAmelCase__ ) )
__snake_case = actual_strassen(matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ ) , matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ ) )
__snake_case = actual_strassen(matrix_subtraction(lowerCAmelCase__ , lowerCAmelCase__ ) , matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ ) )
__snake_case = actual_strassen(matrix_subtraction(lowerCAmelCase__ , lowerCAmelCase__ ) , matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ ) )
__snake_case = matrix_addition(matrix_subtraction(matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) , lowerCAmelCase__ )
__snake_case = matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ )
__snake_case = matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ )
__snake_case = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) , lowerCAmelCase__ )
# construct the new matrix from our 4 quadrants
__snake_case = []
for i in range(len(lowerCAmelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCAmelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCamelCase__ ( snake_case_ : list , snake_case_ : list ) -> Tuple:
if matrix_dimensions(lowerCAmelCase__ )[1] != matrix_dimensions(lowerCAmelCase__ )[0]:
__snake_case = (
'Unable to multiply these matrices, please check the dimensions.\n'
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowerCAmelCase__ )
__snake_case = matrix_dimensions(lowerCAmelCase__ )
__snake_case = matrix_dimensions(lowerCAmelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__snake_case = max(*lowerCAmelCase__ , *lowerCAmelCase__ )
__snake_case = int(math.pow(2 , math.ceil(math.loga(lowerCAmelCase__ ) ) ) )
__snake_case = matrixa
__snake_case = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCAmelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCAmelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCAmelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__snake_case = actual_strassen(lowerCAmelCase__ , lowerCAmelCase__ )
# Removing the additional zeros
for i in range(0 , lowerCAmelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCAmelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
snake_case_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
snake_case_ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 709
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Tuple , *a__ : Optional[Any] , **a__ : Any ):
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__ )
| 388
| 0
|
'''simple docstring'''
def __lowerCamelCase ( UpperCAmelCase_ ) ->list[int]:
snake_case__ = [0 for i in range(len(UpperCAmelCase_ ) )]
# initialize interval's left pointer and right pointer
snake_case__ , snake_case__ = 0, 0
for i in range(1 , len(UpperCAmelCase_ ) ):
# case when current index is inside the interval
if i <= right_pointer:
snake_case__ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
snake_case__ = min_edge
while go_next(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
snake_case__ , snake_case__ = i, i + z_result[i] - 1
return z_result
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->bool:
return i + z_result[i] < len(UpperCAmelCase_ ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
snake_case__ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
snake_case__ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCAmelCase_ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
'''simple docstring'''
import os
def __lowerCamelCase ( UpperCAmelCase_ = "input.txt" ) ->int:
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as input_file:
snake_case__ = [
[int(UpperCAmelCase_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
snake_case__ = len(UpperCAmelCase_ )
snake_case__ = len(matrix[0] )
snake_case__ = [[-1 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
snake_case__ = matrix[i][0]
for j in range(1 , UpperCAmelCase_ ):
for i in range(UpperCAmelCase_ ):
snake_case__ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase_ ):
snake_case__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368
| 1
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
UpperCAmelCase: Dict = parse(importlib.metadata.version("""torch"""))
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_lowercase : Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Tuple = parse(importlib.metadata.version(__UpperCAmelCase ) )
return operation(__UpperCAmelCase , parse(__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
return compare_versions(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 600
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase=True , __UpperCAmelCase=2 ):
from .. import __version__
_lowercase : Tuple = take_from
_lowercase : str = ()
if not isinstance(args[0] , __UpperCAmelCase ):
_lowercase : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse(__UpperCAmelCase ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
_lowercase : Tuple = None
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCAmelCase ),)
_lowercase : List[Any] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__UpperCAmelCase , __UpperCAmelCase ):
values += (getattr(__UpperCAmelCase , __UpperCAmelCase ),)
_lowercase : int = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_lowercase : Optional[int] = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_lowercase : Any = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __UpperCAmelCase , stacklevel=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) > 0:
_lowercase : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1]
_lowercase : List[Any] = call_frame.filename
_lowercase : Any = call_frame.lineno
_lowercase : List[Any] = call_frame.function
_lowercase , _lowercase : Any = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__UpperCAmelCase ) == 0:
return
elif len(__UpperCAmelCase ) == 1:
return values[0]
return values
| 600
| 1
|
from __future__ import annotations
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : str = set(__lowerCamelCase), [start]
while stack:
snake_case__ : str = stack.pop()
explored.add(__lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(__lowerCamelCase)
return explored
lowercase_: Optional[int] = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 648
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any =[
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(snake_case))
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =[
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(snake_case))
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(snake_case))
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int =[
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(snake_case))
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Any =[
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(snake_case))
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCAmelCase : str ='fp16'
self.assertTrue(is_safetensors_compatible(snake_case , variant=snake_case))
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =[
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCAmelCase : int ='fp16'
self.assertTrue(is_safetensors_compatible(snake_case , variant=snake_case))
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
# pass variant but use the non-variant filenames
_UpperCAmelCase : Optional[int] =[
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_UpperCAmelCase : Optional[Any] ='fp16'
self.assertTrue(is_safetensors_compatible(snake_case , variant=snake_case))
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : str =[
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCAmelCase : List[str] ='fp16'
self.assertFalse(is_safetensors_compatible(snake_case , variant=snake_case))
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =[
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_UpperCAmelCase : Optional[Any] ='fp16'
self.assertTrue(is_safetensors_compatible(snake_case , variant=snake_case))
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
# pass variant but use the non-variant filenames
_UpperCAmelCase : Optional[int] =[
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_UpperCAmelCase : List[Any] ='fp16'
self.assertTrue(is_safetensors_compatible(snake_case , variant=snake_case))
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str =[
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCAmelCase : Tuple ='fp16'
self.assertFalse(is_safetensors_compatible(snake_case , variant=snake_case))
| 446
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Dict = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "swinv2"
_lowerCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , UpperCamelCase=224 , UpperCamelCase=4 , UpperCamelCase=3 , UpperCamelCase=96 , UpperCamelCase=[2, 2, 6, 2] , UpperCamelCase=[3, 6, 12, 24] , UpperCamelCase=7 , UpperCamelCase=4.0 , UpperCamelCase=True , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=False , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase=32 , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(UpperCamelCase )
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) )
lowerCamelCase_ = (0, 0, 0, 0)
| 711
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ : str = """src/diffusers"""
a_ : int = """."""
# This is to make sure the diffusers module imported is the one in the repo.
a_ : int = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ : List[Any] = spec.loader.load_module()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str ):
return line.startswith(UpperCAmelCase_ ) or len(UpperCAmelCase_ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , UpperCAmelCase_ ) is not None
def __snake_case ( UpperCAmelCase_ : List[Any] ):
lowerCamelCase_ = object_name.split("." )
lowerCamelCase_ = 0
# First let's find the module where our object lives.
lowerCamelCase_ = parts[i]
while i < len(UpperCAmelCase_ ) and not os.path.isfile(os.path.join(UpperCAmelCase_ , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCAmelCase_ ):
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , parts[i] )
if i >= len(UpperCAmelCase_ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCAmelCase_ , F'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase_ = ""
lowerCamelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase_ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase_ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase_ = line_index
while line_index < len(UpperCAmelCase_ ) and _should_continue(lines[line_index] , UpperCAmelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
return "".join(UpperCAmelCase_ )
a_ : Optional[Any] = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
a_ : Optional[int] = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
a_ : List[str] = re.compile(R"""<FILL\s+[^>]*>""")
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = code.split("\n" )
lowerCamelCase_ = 0
while idx < len(UpperCAmelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase_ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def __snake_case ( UpperCAmelCase_ : Union[str, Any] ):
lowerCamelCase_ = len(get_indent(UpperCAmelCase_ ) ) > 0
if has_indent:
lowerCamelCase_ = F'''class Bla:\n{code}'''
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase_ )
lowerCamelCase_ = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = style_docstrings_in_code(UpperCAmelCase_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=False ):
with open(UpperCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = []
lowerCamelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase_ ):
lowerCamelCase_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = search.groups()
lowerCamelCase_ = find_code_in_diffusers(UpperCAmelCase_ )
lowerCamelCase_ = get_indent(UpperCAmelCase_ )
lowerCamelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase_ = theoretical_indent
lowerCamelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase_ = True
while line_index < len(UpperCAmelCase_ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase_ ):
break
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _should_continue(UpperCAmelCase_ , UpperCAmelCase_ ) and re.search(F'''^{indent}# End copy''' , UpperCAmelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ = lines[start_index:line_index]
lowerCamelCase_ = "".join(UpperCAmelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCAmelCase_ ) is None]
lowerCamelCase_ = "\n".join(UpperCAmelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase_ ) > 0:
lowerCamelCase_ = replace_pattern.replace("with" , "" ).split("," )
lowerCamelCase_ = [_re_replace_pattern.search(UpperCAmelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = pattern.groups()
lowerCamelCase_ = re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if option.strip() == "all-casing":
lowerCamelCase_ = re.sub(obja.lower() , obja.lower() , UpperCAmelCase_ )
lowerCamelCase_ = re.sub(obja.upper() , obja.upper() , UpperCAmelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase_ = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase_ = start_index + 1
if overwrite and len(UpperCAmelCase_ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase_ )
return diffs
def __snake_case ( UpperCAmelCase_ : bool = False ):
lowerCamelCase_ = glob.glob(os.path.join(UpperCAmelCase_ , "**/*.py" ) , recursive=UpperCAmelCase_ )
lowerCamelCase_ = []
for filename in all_files:
lowerCamelCase_ = is_copy_consistent(UpperCAmelCase_ , UpperCAmelCase_ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCAmelCase_ ) > 0:
lowerCamelCase_ = "\n".join(UpperCAmelCase_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 445
| 0
|
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int = 10**9 ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
lowercase_ = tempfile.mkdtemp()
# fmt: off
lowercase_ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_))))
lowercase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase_ = {"unk_token": "<unk>"}
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE_))
lowercase_ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def __UpperCAmelCase ( self : Any , **__lowerCAmelCase : List[str]) -> Tuple:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **SCREAMING_SNAKE_CASE_)
def __UpperCAmelCase ( self : Dict , **__lowerCAmelCase : List[str]) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **SCREAMING_SNAKE_CASE_)
def __UpperCAmelCase ( self : Any , **__lowerCAmelCase : Dict) -> Optional[int]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_)
def __UpperCAmelCase ( self : List[str]) -> List[str]:
shutil.rmtree(self.tmpdirname)
def __UpperCAmelCase ( self : str) -> int:
lowercase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
lowercase_ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int) -> Tuple:
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = self.get_image_processor()
lowercase_ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
processor_slow.save_pretrained(self.tmpdirname)
lowercase_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_)
lowercase_ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
processor_fast.save_pretrained(self.tmpdirname)
lowercase_ = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_)
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_)
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_)
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
lowercase_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowercase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
lowercase_ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_)
lowercase_ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_)
def __UpperCAmelCase ( self : Optional[int]) -> Any:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase_ = self.prepare_image_inputs()
lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="np")
lowercase_ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCAmelCase ( self : Tuple) -> Tuple:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase_ = "lower newer"
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ , return_tensors="np")
lowercase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="np")
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def __UpperCAmelCase ( self : Dict) -> str:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase_ = "lower newer"
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_):
processor()
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
lowercase_ = "google/owlvit-base-patch32"
lowercase_ = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_)
lowercase_ = ["cat", "nasa badge"]
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_)
lowercase_ = 16
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_):
processor()
def __UpperCAmelCase ( self : List[str]) -> int:
lowercase_ = "google/owlvit-base-patch32"
lowercase_ = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_)
lowercase_ = [["cat", "nasa badge"], ["person"]]
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_)
lowercase_ = 16
lowercase_ = len(SCREAMING_SNAKE_CASE_)
lowercase_ = max([len(SCREAMING_SNAKE_CASE_) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_):
processor()
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
lowercase_ = "google/owlvit-base-patch32"
lowercase_ = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_)
lowercase_ = ["cat", "nasa badge"]
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_)
lowercase_ = 16
lowercase_ = inputs["input_ids"]
lowercase_ = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def __UpperCAmelCase ( self : Tuple) -> Any:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase_ = self.prepare_image_inputs()
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(images=SCREAMING_SNAKE_CASE_ , query_images=SCREAMING_SNAKE_CASE_)
self.assertListEqual(list(inputs.keys()) , ["query_pixel_values", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_):
processor()
def __UpperCAmelCase ( self : Any) -> Optional[int]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_)
lowercase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ = processor.batch_decode(SCREAMING_SNAKE_CASE_)
lowercase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 720
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase_ : Dict = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self : int , __lowerCAmelCase : bool = True) -> None:
lowercase_ = {} # dictionary of lists
lowercase_ = directed
def __UpperCAmelCase ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : T) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase)
self.adj_list[destination_vertex].append(__lowerCAmelCase)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase)
lowercase_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowerCAmelCase)
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase_ = [destination_vertex]
lowercase_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCAmelCase)
lowercase_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase_ = [destination_vertex]
lowercase_ = []
return self
def __repr__( self : str) -> str:
return pformat(self.adj_list)
| 461
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 587
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : int = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 587
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def A ( self : Optional[int] )-> int:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def A ( self : int )-> Any:
__UpperCamelCase = self.dummy_uncond_unet
__UpperCamelCase = ScoreSdeVeScheduler()
__UpperCamelCase = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A_ ).images
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A_ , return_dict=A_ )[
0
]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[Any] )-> List[str]:
__UpperCamelCase = "google/ncsnpp-church-256"
__UpperCamelCase = UNetaDModel.from_pretrained(A_ )
__UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(A_ )
__UpperCamelCase = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=A_ ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 228
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Union[str, Any] )-> Tuple:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) )
@slow
def A ( self : List[Any] )-> Union[str, Any]:
__UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
__UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCamelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCamelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCamelCase = model(A_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , A_ , atol=1e-3 ) )
| 228
| 1
|
def lowerCAmelCase__ ( lowerCamelCase_ : list[list]):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = current_set.copy()
for row_index, row in enumerate(lowerCamelCase_):
lowerCAmelCase__ : List[Any] = row[0]
for column_index, column in enumerate(lowerCamelCase_):
if magnitude == 0:
lowerCAmelCase__ : List[Any] = column
continue
lowerCAmelCase__ : Optional[Any] = column / magnitude
# Subtract to cancel term
lowerCAmelCase__ : Any = current_set[0]
lowerCAmelCase__ : Optional[Any] = [first_row]
lowerCAmelCase__ : Union[str, Any] = current_set[1::]
for row in current_set:
lowerCAmelCase__ : str = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase_)
continue
for column_index in range(len(lowerCamelCase_)):
temp_row.append(first_row[column_index] - row[column_index])
final_set.append(lowerCamelCase_)
# Create next recursion iteration set
if len(final_set[0]) != 3:
lowerCAmelCase__ : Tuple = final_set[0]
lowerCAmelCase__ : str = []
lowerCAmelCase__ : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0])
next_iteration.append(row[1::])
lowerCAmelCase__ : Optional[int] = simplify(lowerCamelCase_)
for i in range(len(lowerCamelCase_)):
resultant[i].insert(0 ,current_first_column[i])
resultant.insert(0 ,lowerCamelCase_)
lowerCAmelCase__ : Tuple = resultant
return final_set
def lowerCAmelCase__ ( lowerCamelCase_ : list[list]):
'''simple docstring'''
if len(lowerCamelCase_) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''')
lowerCAmelCase__ : List[str] = len(lowerCamelCase_) + 1
if any(len(lowerCamelCase_) != _length for item in equations):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''')
for row in equations:
if any(not isinstance(lowerCamelCase_ ,(int, float)) for column in row):
raise ValueError('''solve_simultaneous() requires lists of integers''')
if len(lowerCamelCase_) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCAmelCase__ : str = equations.copy()
if any(0 in row for row in data_set):
lowerCAmelCase__ : int = data_set.copy()
lowerCAmelCase__ : str = []
for row_index, row in enumerate(lowerCamelCase_):
if 0 not in row:
lowerCAmelCase__ : Optional[int] = data_set.pop(lowerCamelCase_)
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''')
data_set.insert(0 ,lowerCamelCase_)
lowerCAmelCase__ : Tuple = data_set.copy()
lowerCAmelCase__ : List[str] = simplify(lowerCamelCase_)
lowerCAmelCase__ : int = simplified[::-1]
lowerCAmelCase__ : list = []
for row in simplified:
lowerCAmelCase__ : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0)
continue
solutions.append(current_solution / row[-2])
continue
lowerCAmelCase__ : Tuple = row.copy()[: len(lowerCamelCase_) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0)
if len(lowerCamelCase_) == 0:
solutions.append(0)
continue
lowerCAmelCase__ : Union[str, Any] = temp_row[1::]
lowerCAmelCase__ : Tuple = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase_):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase_)
lowerCAmelCase__ : Tuple = []
for item in solutions:
final.append(float(round(lowerCamelCase_ ,5)))
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Any =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 647
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=7 ,__lowerCamelCase=3 ,__lowerCamelCase=10 ,__lowerCamelCase=18 ,__lowerCamelCase=30 ,__lowerCamelCase=4_00 ,__lowerCamelCase=True ,__lowerCamelCase=None ,__lowerCamelCase=True ,__lowerCamelCase=[0.5, 0.5, 0.5] ,__lowerCamelCase=[0.5, 0.5, 0.5] ,__lowerCamelCase=None ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = size if size is not None else {'''shortest_edge''': 18}
lowerCAmelCase__ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : str = num_frames
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : str = min_resolution
lowerCAmelCase__ : Optional[Any] = max_resolution
lowerCAmelCase__ : Optional[Any] = do_resize
lowerCAmelCase__ : List[str] = size
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : int = image_mean
lowerCAmelCase__ : Optional[int] = image_std
lowerCAmelCase__ : Optional[Any] = crop_size
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =VivitImageProcessor if is_vision_available() else None
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = VivitImageProcessingTester(self )
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'''size''' ) )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size ,{'''height''': 18, '''width''': 18} )
lowerCAmelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCAmelCase__ : str = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
self.assertIsInstance(video[0] ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Optional[int] = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Tuple = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,numpify=__lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
self.assertIsInstance(video[0] ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Dict = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : int = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__lowerCamelCase ,torchify=__lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
self.assertIsInstance(video[0] ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[Any] = image_processing(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 647
| 1
|
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
_SCREAMING_SNAKE_CASE : List[Any] = '''1'''
_SCREAMING_SNAKE_CASE : Optional[int] = '''0'''
_SCREAMING_SNAKE_CASE : List[Any] = '''1'''
_SCREAMING_SNAKE_CASE : List[Any] = ort.SessionOptions()
_SCREAMING_SNAKE_CASE : List[str] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
_SCREAMING_SNAKE_CASE : List[str] = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
_SCREAMING_SNAKE_CASE : Any = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
_SCREAMING_SNAKE_CASE : Optional[int] = ort.RunOptions()
_SCREAMING_SNAKE_CASE : Optional[Any] = 128
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE : Tuple = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE : int = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
_SCREAMING_SNAKE_CASE : int = time.time()
_SCREAMING_SNAKE_CASE : Tuple = 2000
_SCREAMING_SNAKE_CASE : List[Any] = {}
for iter in range(max_iters):
_SCREAMING_SNAKE_CASE : Tuple = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 719
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return number | (1 << position)
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return number & ~(1 << position)
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return number ^ (1 << position)
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137
| 0
|
'''simple docstring'''
import pytest
_snake_case : List[Any] = '__dummy_dataset1__'
_snake_case : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def snake_case_ ():
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def snake_case_ ():
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def snake_case_ (UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
_a = dataset_loading_script_name
_a = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=UpperCamelCase )
_a = script_dir / f'{script_name}.py'
with open(UpperCamelCase , '''w''' ) as f:
f.write(UpperCamelCase )
return str(UpperCamelCase )
| 22
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase_ = 1_6
UpperCamelCase_ = 3_2
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ = 16 ) -> List[str]:
__UpperCAmelCase =AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase =datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase =16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase =8
else:
__UpperCAmelCase =None
return tokenizer.pad(
snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCAmelCase =DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
__UpperCAmelCase =DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase_ = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case__ ) == "1":
__UpperCAmelCase =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCAmelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__UpperCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase =config['''lr''']
__UpperCAmelCase =int(config['''num_epochs'''] )
__UpperCAmelCase =int(config['''seed'''] )
__UpperCAmelCase =int(config['''batch_size'''] )
set_seed(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =get_dataloaders(snake_case__ , snake_case__ )
__UpperCAmelCase =evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase =batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase =AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
__UpperCAmelCase =get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCAmelCase =os.path.split(snake_case__ )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCAmelCase =0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase =model(**snake_case__ )
__UpperCAmelCase =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase =model(**snake_case__ )
__UpperCAmelCase =outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
__UpperCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case__ ),
'''epoch''': epoch,
} , step=snake_case__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__UpperCAmelCase =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 132
| 0
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =cos(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =(1 - _cos) / 2
_UpperCamelCase =1 - _cos
_UpperCamelCase =1 + alpha
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =cos(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =(1 + _cos) / 2
_UpperCamelCase =-1 - _cos
_UpperCamelCase =1 + alpha
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =cos(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =_sin / 2
_UpperCamelCase =0
_UpperCamelCase =-ba
_UpperCamelCase =1 + alpha
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =cos(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =1 - alpha
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 + alpha
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) , ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =cos(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =10 ** (gain_db / 40)
_UpperCamelCase =1 + alpha * big_a
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha * big_a
_UpperCamelCase =1 + alpha / big_a
_UpperCamelCase =-2 * _cos
_UpperCamelCase =1 - alpha / big_a
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) , ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =cos(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =10 ** (gain_db / 40)
_UpperCamelCase =(big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase =(big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase =(big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase =(big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase =2 * sqrt(__SCREAMING_SNAKE_CASE ) * alpha
_UpperCamelCase =big_a * (pmc + aaa)
_UpperCamelCase =2 * big_a * mpc
_UpperCamelCase =big_a * (pmc - aaa)
_UpperCamelCase =ppmc + aaa
_UpperCamelCase =-2 * pmpc
_UpperCamelCase =ppmc - aaa
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 / sqrt(2 ) , ):
"""simple docstring"""
_UpperCamelCase =tau * frequency / samplerate
_UpperCamelCase =sin(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =cos(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =_sin / (2 * q_factor)
_UpperCamelCase =10 ** (gain_db / 40)
_UpperCamelCase =(big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase =(big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase =(big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase =(big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase =2 * sqrt(__SCREAMING_SNAKE_CASE ) * alpha
_UpperCamelCase =big_a * (ppmc + aaa)
_UpperCamelCase =-2 * big_a * pmpc
_UpperCamelCase =big_a * (ppmc - aaa)
_UpperCamelCase =pmc + aaa
_UpperCamelCase =2 * mpc
_UpperCamelCase =pmc - aaa
_UpperCamelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 271
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCamelCase : str = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__lowerCamelCase : Optional[int] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__lowerCamelCase : str = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase__ ( self : str ) -> List[str]:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ) -> str:
_UpperCamelCase =len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_UpperCamelCase =[[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
_UpperCamelCase =TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
_UpperCamelCase =sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 271
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Dict = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 538
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : int = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class a__( snake_case__ , unittest.TestCase ):
a_ : List[Any] = GPTSwaTokenizer
a_ : int = False
a_ : List[Any] = True
a_ : Any = False
def _lowercase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ =GPTSwaTokenizer(_UpperCAmelCase , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , _UpperCAmelCase ) -> Any:
snake_case__ ='This is a test'
snake_case__ ='This is a test'
return input_text, output_text
def _lowercase ( self ) -> str:
snake_case__ ='<s>'
snake_case__ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 2000 )
def _lowercase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def _lowercase ( self ) -> Any:
snake_case__ =GPTSwaTokenizer(_UpperCAmelCase )
snake_case__ =tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [465, 287, 265, 631, 842] )
snake_case__ =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_UpperCAmelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
snake_case__ =tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
snake_case__ =tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
# fmt: off
self.assertListEqual(
_UpperCAmelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =GPTSwaTokenizer(_UpperCAmelCase )
snake_case__ =['This is a test', 'I was born in 92000, and this is falsé.']
snake_case__ =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_UpperCAmelCase ) , _UpperCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_UpperCAmelCase ) , _UpperCAmelCase )
@slow
def _lowercase ( self ) -> Dict:
snake_case__ =[
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
snake_case__ ={'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='AI-Sweden/gpt-sw3-126m' , sequences=_UpperCAmelCase , )
| 538
| 1
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( snake_case__ , snake_case__ , unittest.TestCase):
A: int = IFImgaImgSuperResolutionPipeline
A: Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
A: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"})
A: Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any=0 ) -> int:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCamelCase__ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 721
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _a ( SCREAMING_SNAKE_CASE : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__UpperCamelCase : List[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __magic_name__ ( __lowerCAmelCase):
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCamelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Optional[int] , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
UpperCamelCase__ : List[str] = model_type
UpperCamelCase__ : Optional[int] = tf_checkpoint
UpperCamelCase__ : List[Any] = pytorch_dump_output
UpperCamelCase__ : List[Any] = config
UpperCamelCase__ : Any = finetuning_task_name
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase__ : str = self._tf_checkpoint
UpperCamelCase__ : List[Any] = ''''''
else:
UpperCamelCase__ : Any = self._tf_checkpoint
UpperCamelCase__ : List[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase__ , self._config , self._pytorch_dump_output , lowerCamelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 106
| 0
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
lowercase = 0
lowercase = str(__SCREAMING_SNAKE_CASE )
while len(__SCREAMING_SNAKE_CASE ) != 1:
lowercase = [int(__SCREAMING_SNAKE_CASE ) for i in num_string]
lowercase = 1
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
total *= numbers[i]
lowercase = str(__SCREAMING_SNAKE_CASE )
steps += 1
return steps
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
lowercase = 0
lowercase = str(__SCREAMING_SNAKE_CASE )
while len(__SCREAMING_SNAKE_CASE ) != 1:
lowercase = [int(__SCREAMING_SNAKE_CASE ) for i in num_string]
lowercase = 0
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) ):
total += numbers[i]
lowercase = str(__SCREAMING_SNAKE_CASE )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
# Function to print upper half of diamond (pyramid)
def __UpperCamelCase ( _A ):
for i in range(0 , _A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __UpperCamelCase ( _A ):
for i in range(_A , 0 , -1 ):
for _ in range(_A , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __UpperCamelCase ( _A ):
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(_A ) # upper half
reverse_floyd(_A ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
_A = 1
while K:
_A = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_A = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 431
| 0
|
from __future__ import annotations
from typing import TypedDict
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : str
snake_case : int
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(a__ ) )]
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
UpperCamelCase__ = all_rotations(a__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCamelCase__ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(a__ ),
}
return response
def _UpperCamelCase (a__ :str , a__ :int ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
UpperCamelCase__ = int(a__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(a__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
UpperCamelCase__ = [""""""] * len(a__ )
for _ in range(len(a__ ) ):
for i in range(len(a__ ) ):
UpperCamelCase__ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCamelCase__ = "Provide a string that I will generate its BWT transform: "
UpperCamelCase__ = input(entry_msg).strip()
UpperCamelCase__ = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result['bwt_string']}'"""
)
UpperCamelCase__ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
f"""we get original string '{original_string}'"""
)
| 548
|
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
while repunit:
UpperCamelCase__ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (a__ :int = 100_0000 ):
"""simple docstring"""
UpperCamelCase__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 548
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : str = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 131
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( a ):
def __init__( self , snake_case_ , snake_case_=7_6_8 ) -> Optional[int]:
super().__init__(snake_case_ )
_a = proj_size
_a = CLIPVisionModel(snake_case_ )
_a = PaintByExampleMapper(snake_case_ )
_a = nn.LayerNorm(config.hidden_size )
_a = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_a = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=False ) -> Any:
_a = self.model(pixel_values=snake_case_ )
_a = clip_output.pooler_output
_a = self.mapper(latent_states[:, None] )
_a = self.final_layer_norm(snake_case_ )
_a = self.proj_out(snake_case_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class A ( nn.Module ):
def __init__( self , snake_case_ ) -> Tuple:
super().__init__()
_a = (config.num_hidden_layers + 1) // 5
_a = config.hidden_size
_a = 1
_a = nn.ModuleList(
[
BasicTransformerBlock(snake_case_ , snake_case_ , snake_case_ , activation_fn="gelu" , attention_bias=snake_case_ )
for _ in range(snake_case_ )
] )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
for block in self.blocks:
_a = block(snake_case_ )
return hidden_states
| 131
| 1
|
'''simple docstring'''
def _lowerCamelCase( UpperCamelCase__ : int ) -> int:
A : Optional[Any] = [1]
A : Tuple = 0, 0, 0
A : Any = ugly_nums[ia] * 2
A : Tuple = ugly_nums[ia] * 3
A : Union[str, Any] = ugly_nums[ia] * 5
for _ in range(1 , UpperCamelCase__ ):
A : Optional[Any] = min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ugly_nums.append(UpperCamelCase__ )
if next_num == next_a:
ia += 1
A : Any = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
A : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
A : Union[str, Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_00) = }''')
| 715
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
class _lowercase ( a ):
_UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=125 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A : str = [f'''<extra_id_{i}>''' for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A : List[str] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
A : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
A : int = extra_ids
A : Any = 2**8 # utf is 8 bits
# define special tokens dict
A : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
A : Tuple = len(self.special_tokens_encoder )
A : Union[str, Any] = len(_UpperCAmelCase )
for i, token in enumerate(_UpperCAmelCase ):
A : Optional[Any] = self.vocab_size + i - n
A : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCAmelCase )) + [1]
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def snake_case ( self , _UpperCAmelCase ):
if len(_UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
A : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
A : Union[str, Any] = self._add_eos_if_not_present(_UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
A : Tuple = self._add_eos_if_not_present(_UpperCAmelCase )
return token_ids_a + token_ids_a
def snake_case ( self , _UpperCAmelCase ):
A : str = [chr(_UpperCAmelCase ) for i in text.encode('''utf-8''' )]
return tokens
def snake_case ( self , _UpperCAmelCase ):
if token in self.special_tokens_encoder:
A : Union[str, Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
A : int = self.added_tokens_encoder[token]
elif len(_UpperCAmelCase ) != 1:
A : Union[str, Any] = self.unk_token_id
else:
A : Union[str, Any] = ord(_UpperCAmelCase ) + self._num_special_tokens
return token_id
def snake_case ( self , _UpperCAmelCase ):
if index in self.special_tokens_decoder:
A : Optional[Any] = self.special_tokens_decoder[index]
else:
A : Dict = chr(index - self._num_special_tokens )
return token
def snake_case ( self , _UpperCAmelCase ):
A : List[str] = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
A : List[str] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
A : Optional[int] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
A : Union[str, Any] = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
A : str = token.encode('''utf-8''' )
else:
A : Tuple = bytes([ord(_UpperCAmelCase )] )
bstring += tok_string
A : List[Any] = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
return ()
| 537
| 0
|
"""simple docstring"""
def lowercase_ ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def lowercase_ ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 595
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 595
| 1
|
from __future__ import annotations
import math
class UpperCAmelCase__ :
def __init__( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Tuple = size
# approximate the overall size of segment tree with given value
UpperCAmelCase_: Optional[Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase_: Union[str, Any] = [0 for i in range(0 , 4 * size )]
UpperCAmelCase_: Union[str, Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def snake_case_ ( self , A__ ):
"""simple docstring"""
return idx * 2
def snake_case_ ( self , A__ ):
"""simple docstring"""
return idx * 2 + 1
def snake_case_ ( self , A__ , A__ , A__ , A__ ):
"""simple docstring"""
if left_element == right_element:
UpperCAmelCase_: Dict = a[left_element - 1]
else:
UpperCAmelCase_: Optional[int] = (left_element + right_element) // 2
self.build(self.left(A__ ) , A__ , A__ , A__ )
self.build(self.right(A__ ) , mid + 1 , A__ , A__ )
UpperCAmelCase_: Optional[int] = max(
self.segment_tree[self.left(A__ )] , self.segment_tree[self.right(A__ )] )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
if self.flag[idx] is True:
UpperCAmelCase_: List[Any] = self.lazy[idx]
UpperCAmelCase_: int = False
if left_element != right_element:
UpperCAmelCase_: Optional[int] = self.lazy[idx]
UpperCAmelCase_: Any = self.lazy[idx]
UpperCAmelCase_: Optional[Any] = True
UpperCAmelCase_: str = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase_: Any = val
if left_element != right_element:
UpperCAmelCase_: Tuple = val
UpperCAmelCase_: List[str] = val
UpperCAmelCase_: List[str] = True
UpperCAmelCase_: Any = True
return True
UpperCAmelCase_: List[str] = (left_element + right_element) // 2
self.update(self.left(A__ ) , A__ , A__ , A__ , A__ , A__ )
self.update(self.right(A__ ) , mid + 1 , A__ , A__ , A__ , A__ )
UpperCAmelCase_: Dict = max(
self.segment_tree[self.left(A__ )] , self.segment_tree[self.right(A__ )] )
return True
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
if self.flag[idx] is True:
UpperCAmelCase_: str = self.lazy[idx]
UpperCAmelCase_: Optional[int] = False
if left_element != right_element:
UpperCAmelCase_: Any = self.lazy[idx]
UpperCAmelCase_: List[str] = self.lazy[idx]
UpperCAmelCase_: List[Any] = True
UpperCAmelCase_: int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase_: Any = (left_element + right_element) // 2
UpperCAmelCase_: List[Any] = self.query(self.left(A__ ) , A__ , A__ , A__ , A__ )
UpperCAmelCase_: Tuple = self.query(self.right(A__ ) , mid + 1 , A__ , A__ , A__ )
return max(A__ , A__ )
def __str__( self ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , A__ , A__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCAmelCase = 15
_lowerCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 306
|
class UpperCAmelCase__ :
def __init__( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Tuple = arr.split("," )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = [int(self.array[0] )] * len(self.array )
UpperCAmelCase_: List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCAmelCase_: Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCAmelCase_: Tuple = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_lowerCAmelCase = input("""please input some numbers:""")
_lowerCAmelCase = SubArray(whole_array)
_lowerCAmelCase = array.solve_sub_array()
print(("""the results is:""", re))
| 306
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , ):
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def __a ( self :List[str] , lowerCamelCase__ :Any = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ :List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def __a ( self :List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] = 5_12 , lowerCamelCase__ :int = 5_12 , lowerCamelCase__ :Tuple = 50 , lowerCamelCase__ :Any = 7.5 , lowerCamelCase__ :int = None , lowerCamelCase__ :int = 1 , lowerCamelCase__ :int = 0.0 , lowerCamelCase__ :int = None , lowerCamelCase__ :Union[str, Any] = None , lowerCamelCase__ :Tuple = "pil" , lowerCamelCase__ :List[Any] = True , lowerCamelCase__ :Dict = None , lowerCamelCase__ :Union[str, Any] = 1 , lowerCamelCase__ :List[Any] = None , **lowerCamelCase__ :str , ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ :Union[str, Any] = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ :int = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(SCREAMING_SNAKE_CASE_ )}.""" )
# get prompt text embeddings
UpperCamelCase__ :Dict = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase__ :Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ :Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase__ :Any = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase__ :List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = text_embeddings.shape
UpperCamelCase__ :str = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase__ :Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ :Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ :List[str] = 42
if negative_prompt is None:
UpperCamelCase__ :str = [""""""]
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !="""
f""" {type(SCREAMING_SNAKE_CASE_ )}.""" )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ :Optional[Any] = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCamelCase__ :List[str] = negative_prompt
UpperCamelCase__ :Union[str, Any] = text_input_ids.shape[-1]
UpperCamelCase__ :Dict = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase__ :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ :Union[str, Any] = uncond_embeddings.shape[1]
UpperCamelCase__ :Dict = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase__ :int = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ :List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ :Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ :int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase__ :List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase__ :int = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
UpperCamelCase__ :Optional[int] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase__ :int = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ :List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase__ :Dict = latents_reference.to(self.device )
UpperCamelCase__ :Optional[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase__ :str = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase__ :int = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase__ :Optional[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase__ :Optional[int] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase__ :str = 0 if dx < 0 else dx
UpperCamelCase__ :Tuple = 0 if dy < 0 else dy
UpperCamelCase__ :List[str] = max(-dx , 0 )
UpperCamelCase__ :List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase__ :Tuple = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase__ :Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ :List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ :str = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ :Any = {}
if accepts_eta:
UpperCamelCase__ :Any = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ :List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ :int = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase__ :Dict = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = noise_pred.chunk(2 )
UpperCamelCase__ :str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ :int = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ :Dict = 1 / 0.1_8215 * latents
UpperCamelCase__ :List[str] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ :Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ :List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase__ :Optional[int] = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).to(
self.device )
UpperCamelCase__ , UpperCamelCase__ :Any = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase__ :List[str] = None
if output_type == "pil":
UpperCamelCase__ :int = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 45
|
'''simple docstring'''
import pprint
import requests
A_ = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
A_ = random_quotes()
pprint.pprint(response)
| 42
| 0
|
"""simple docstring"""
from math import factorial
def UpperCAmelCase__ (snake_case__ : int = 20 ):
"""simple docstring"""
_snake_case : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_snake_case : str = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 705
|
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28
| 0
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ :List[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ :Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ :Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCamelCase__ :Union[str, Any] = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> str:
_UpperCamelCase :Optional[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
_UpperCamelCase :List[Any] = True
# Deal with multi-line cases
elif (
re.search(
Rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _UpperCAmelCase , )
is not None
):
_UpperCamelCase :Any = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCamelCase :Dict = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCamelCase :Optional[Any] = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
_UpperCamelCase :str = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
_UpperCamelCase :int = True
if not attribute_used:
_UpperCamelCase :Optional[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCamelCase :Optional[Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCamelCase :Dict = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCamelCase :List[Any] = True
elif attribute.endswith('''_token_id''' ):
_UpperCamelCase :int = True
# configuration class specific cases
if not case_allowed:
_UpperCamelCase :Dict = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_UpperCamelCase :Any = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A_ ( snake_case__ ) -> Dict:
_UpperCamelCase :Any = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCamelCase :int = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
_UpperCamelCase :int = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCamelCase :Optional[int] = {}
if len(config_class.attribute_map ) > 0:
_UpperCamelCase :Union[str, Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCamelCase :str = inspect.getsourcefile(_UpperCAmelCase )
_UpperCamelCase :int = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCamelCase :List[str] = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith('''modeling_''' )]
# Get the source code strings
_UpperCamelCase :List[str] = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_UpperCamelCase :Any = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_UpperCamelCase :str = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def A_ ( ) -> str:
_UpperCamelCase :Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCamelCase :List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda snake_case__ : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_UpperCamelCase :List[Any] = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_UpperCamelCase :Optional[Any] = unused_attributes
if len(_UpperCAmelCase ) > 0:
_UpperCamelCase :Tuple = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 355
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase_ = TypeVar('T')
class _UpperCamelCase ( Generic[T] ):
'''simple docstring'''
_A = 42 # Cache store of keys
_A = 42 # References of the keys in cache
_A = 1_0 # Maximum capacity of cache
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int ):
_a = deque()
_a = set()
if not n:
_a = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
_a = n
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_a = self.dq_store.pop()
self.key_reference.remove(SCREAMING_SNAKE_CASE_ )
else:
self.dq_store.remove(SCREAMING_SNAKE_CASE_ )
self.dq_store.appendleft(SCREAMING_SNAKE_CASE_ )
self.key_reference.add(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
for k in self.dq_store:
print(SCREAMING_SNAKE_CASE_ )
def __repr__( self : Optional[int] ):
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 562
| 0
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Dict =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase_ : Dict =(
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ : Tuple =False
lowercase_ : Any =False
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = super()._prepare_for_class(A__ ,A__ ,return_labels=A__)
if return_labels:
if model_class in get_values(A__):
lowercase = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa)
return inputs_dict
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_2 ,A__=3_2 ,A__=2 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = embedding_size
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,embedding_size=self.embedding_size ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertModel(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
lowercase = [input_ids, input_mask]
lowercase = model(A__)
lowercase = model(A__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertForMaskedLM(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertForNextSentencePrediction(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertForPreTraining(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(
result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = TFMobileBertForSequenceClassification(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_choices
lowercase = TFMobileBertForMultipleChoice(config=A__)
lowercase = tf.tile(tf.expand_dims(A__ ,1) ,(1, self.num_choices, 1))
lowercase = tf.tile(tf.expand_dims(A__ ,1) ,(1, self.num_choices, 1))
lowercase = tf.tile(tf.expand_dims(A__ ,1) ,(1, self.num_choices, 1))
lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = TFMobileBertForTokenClassification(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = TFMobileBertForQuestionAnswering(config=A__)
lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase = model(A__)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def A__ ( self):
lowercase = TFMobileBertModelTest.TFMobileBertModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A__)
@slow
def A__ ( self):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
lowercase = TFMobileBertModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
lowercase = tf.constant([[0, 1, 2, 3, 4, 5]])
lowercase = model(A__)[0]
lowercase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape ,A__)
lowercase = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
])
tf.debugging.assert_near(output[:, :3, :3] ,A__ ,atol=1E-4)
| 633
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =CLIPConfig
lowercase_ : str =['''CLIPEncoderLayer''']
def __init__( self ,A__):
super().__init__(A__)
lowercase = CLIPVisionModelWithProjection(config.vision_config)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
lowercase = nn.Linear(config.vision_config.projection_dim ,1)
@torch.no_grad()
def A__ ( self ,A__ ,A__ ,A__=0.5 ,A__=0.5):
lowercase = self.vision_model(A__)[0]
lowercase = self.p_head(A__)
lowercase = nsfw_detected.flatten()
lowercase = nsfw_detected > p_threshold
lowercase = nsfw_detected.tolist()
if any(A__):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, nsfw_detected_ in enumerate(A__):
if nsfw_detected_:
lowercase = np.zeros(images[idx].shape)
lowercase = self.w_head(A__)
lowercase = watermark_detected.flatten()
lowercase = watermark_detected > w_threshold
lowercase = watermark_detected.tolist()
if any(A__):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''')
for idx, watermark_detected_ in enumerate(A__):
if watermark_detected_:
lowercase = np.zeros(images[idx].shape)
return images, nsfw_detected, watermark_detected
| 633
| 1
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
def A (__lowerCamelCase :np.ndarray , __lowerCamelCase :Union[int, Iterable[int]] , __lowerCamelCase :bool , __lowerCamelCase :int ):
def constraint_to_multiple_of(__lowerCamelCase :List[str] , __lowerCamelCase :str , __lowerCamelCase :Optional[int]=0 , __lowerCamelCase :Any=None ):
_lowerCAmelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase = (output_size, output_size) if isinstance(__lowerCamelCase , __lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase = get_image_size(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase = output_size
# determine new height and width
_lowerCAmelCase = output_height / input_height
_lowerCAmelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase = scale_width
else:
# fit height
_lowerCAmelCase = scale_height
_lowerCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__lowerCamelCase )
_lowerCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BILINEAR , _lowercase = False , _lowercase = 1 , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase )
_lowerCAmelCase = size if size is not None else {"""height""": 384, """width""": 384}
_lowerCAmelCase = get_size_dict(_lowercase )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = keep_aspect_ratio
_lowerCAmelCase = ensure_multiple_of
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , _lowercase , _lowercase , _lowercase = False , _lowercase = 1 , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_lowerCAmelCase = get_resize_output_image_size(
_lowercase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ):
"""simple docstring"""
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ):
"""simple docstring"""
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowercase )
_lowerCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
_lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_lowercase ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(_lowercase ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_lowercase )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 5
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__magic_name__ = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 250
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 42
@flax_register_to_config
class UpperCamelCase ( nn.Module , __lowercase , __lowercase ):
'''simple docstring'''
A_ = 32
A_ = 4
A_ = 4
A_ = (
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'DownBlock2D',
)
A_ = ('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D')
A_ = False
A_ = (320, 640, 1_280, 1_280)
A_ = 2
A_ = 8
A_ = None
A_ = 1_280
A_ = 0.0
A_ = False
A_ = jnp.floataa
A_ = True
A_ = 0
A_ = False
def UpperCamelCase_ ( self , A_ ) -> FrozenDict:
"""simple docstring"""
_lowerCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_lowerCamelCase = jnp.zeros(A_ , dtype=jnp.floataa )
_lowerCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_lowerCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_lowerCamelCase , _lowerCamelCase = jax.random.split(A_ )
_lowerCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A_ , A_ , A_ , A_ )["params"]
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.block_out_channels
_lowerCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowerCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_lowerCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_lowerCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_lowerCamelCase = FlaxTimestepEmbedding(A_ , dtype=self.dtype )
_lowerCamelCase = self.only_cross_attention
if isinstance(A_ , A_ ):
_lowerCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(A_ , A_ ):
_lowerCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_lowerCamelCase = []
_lowerCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_lowerCamelCase = output_channel
_lowerCamelCase = block_out_channels[i]
_lowerCamelCase = i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowerCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowerCamelCase = FlaxDownBlockaD(
in_channels=A_ , out_channels=A_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(A_ )
_lowerCamelCase = down_blocks
# mid
_lowerCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_lowerCamelCase = []
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = list(reversed(A_ ) )
_lowerCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_lowerCamelCase = output_channel
_lowerCamelCase = reversed_block_out_channels[i]
_lowerCamelCase = reversed_block_out_channels[min(i + 1 , len(A_ ) - 1 )]
_lowerCamelCase = i == len(A_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_lowerCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowerCamelCase = FlaxUpBlockaD(
in_channels=A_ , out_channels=A_ , prev_output_channel=A_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(A_ )
_lowerCamelCase = output_channel
_lowerCamelCase = up_blocks
# out
_lowerCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowerCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , A_ , A_ , A_ , A_=None , A_=None , A_ = True , A_ = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
"""simple docstring"""
# 1. time
if not isinstance(A_ , jnp.ndarray ):
_lowerCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(A_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps.astype(dtype=jnp.floataa )
_lowerCamelCase = jnp.expand_dims(A_ , 0 )
_lowerCamelCase = self.time_proj(A_ )
_lowerCamelCase = self.time_embedding(A_ )
# 2. pre-process
_lowerCamelCase = jnp.transpose(A_ , (0, 2, 3, 1) )
_lowerCamelCase = self.conv_in(A_ )
# 3. down
_lowerCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(A_ , A_ ):
_lowerCamelCase , _lowerCamelCase = down_block(A_ , A_ , A_ , deterministic=not train )
else:
_lowerCamelCase , _lowerCamelCase = down_block(A_ , A_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_lowerCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
A_ , A_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_lowerCamelCase = new_down_block_res_samples
# 4. mid
_lowerCamelCase = self.mid_block(A_ , A_ , A_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_lowerCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_lowerCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(A_ , A_ ):
_lowerCamelCase = up_block(
A_ , temb=A_ , encoder_hidden_states=A_ , res_hidden_states_tuple=A_ , deterministic=not train , )
else:
_lowerCamelCase = up_block(A_ , temb=A_ , res_hidden_states_tuple=A_ , deterministic=not train )
# 6. post-process
_lowerCamelCase = self.conv_norm_out(A_ )
_lowerCamelCase = nn.silu(A_ )
_lowerCamelCase = self.conv_out(A_ )
_lowerCamelCase = jnp.transpose(A_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=A_ )
| 713
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ = "cpu" , A_ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
_lowerCamelCase = device
_lowerCamelCase = CLIPTokenizerFast.from_pretrained(A_ )
_lowerCamelCase = [0.48145466, 0.4578275, 0.40821073]
_lowerCamelCase = [0.26862954, 0.26130258, 0.27577711]
_lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCamelCase = torchvision.transforms.Resize(2_24 )
_lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.resize(A_ )
_lowerCamelCase = self.center_crop(A_ )
_lowerCamelCase = self.normalize(A_ )
return images
def __call__( self , A_=None , A_=None , **A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.tokenizer(text=A_ , **A_ )
_lowerCamelCase = self.preprocess_img(A_ )
_lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A_=10 , A_=0.01 , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , A_=True , A_="image" , A_=True , A_=False , A_=False , A_=False , ) -> None:
"""simple docstring"""
super().__init__()
_lowerCamelCase = None
_lowerCamelCase = device if device else get_device()
if vqgan:
_lowerCamelCase = vqgan
else:
_lowerCamelCase = load_vqgan(self.device , conf_path=A_ , ckpt_path=A_ )
self.vqgan.eval()
if clip:
_lowerCamelCase = clip
else:
_lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_lowerCamelCase = ProcessorGradientFlow(device=self.device )
_lowerCamelCase = iterations
_lowerCamelCase = lr
_lowerCamelCase = log
_lowerCamelCase = make_grid
_lowerCamelCase = return_val
_lowerCamelCase = quantize
_lowerCamelCase = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self , A_=None , A_=None , A_=5 , A_=True ) -> Any:
"""simple docstring"""
_lowerCamelCase = []
if output_path is None:
_lowerCamelCase = '''./animation.gif'''
if input_path is None:
_lowerCamelCase = self.save_path
_lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(A_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_lowerCamelCase = total_duration / len(A_ )
_lowerCamelCase = [frame_duration] * len(A_ )
if extend_frames:
_lowerCamelCase = 1.5
_lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A_ ) )
imageio.mimsave(A_ , A_ , duration=A_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase_ ( self , A_=None , A_=None ) -> Union[str, Any]:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_lowerCamelCase = preprocess(Image.open(A_ ) , target_image_size=2_56 ).to(self.device )
_lowerCamelCase = preprocess_vqgan(A_ )
_lowerCamelCase , *_lowerCamelCase = self.vqgan.encode(A_ )
return z
def UpperCamelCase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.latent.detach().requires_grad_()
_lowerCamelCase = base_latent + transform_vector
if self.quantize:
_lowerCamelCase , *_lowerCamelCase = self.vqgan.quantize(A_ )
else:
_lowerCamelCase = trans_latent
return self.vqgan.decode(A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_=None ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.clip_preprocessor(text=A_ , images=A_ , return_tensors='''pt''' , padding=A_ )
_lowerCamelCase = self.clip(**A_ )
_lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
_lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , A_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , A_ , weights=neg_prompts['''weights'''] )
else:
_lowerCamelCase = torch.tensor([1] , device=self.device )
_lowerCamelCase = -torch.log(A_ ) + torch.log(A_ )
return loss
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = torch.randn_like(self.latent , requires_grad=A_ , device=self.device )
_lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCamelCase = self._add_vector(A_ )
_lowerCamelCase = loop_post_process(A_ )
_lowerCamelCase = self._get_CLIP_loss(A_ , A_ , A_ )
print('''CLIP loss''' , A_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
wandb.init(reinit=A_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_lowerCamelCase = Image.open(A_ )
_lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(A_ ) )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
if not prompts:
return []
_lowerCamelCase = []
_lowerCamelCase = []
if isinstance(A_ , A_ ):
_lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A_ , (tuple, list) ):
_lowerCamelCase = prompt[0]
_lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
_lowerCamelCase , _lowerCamelCase = prompt.split(''':''' )
_lowerCamelCase = float(A_ )
else:
_lowerCamelCase = prompt
_lowerCamelCase = 1.0
processed_prompts.append(A_ )
weights.append(A_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A_ , device=self.device ),
}
def UpperCamelCase_ ( self , A_ , A_=None , A_=None , A_=True , A_=False , A_=True , A_=True , A_=None , ) -> str:
"""simple docstring"""
if image_path:
_lowerCamelCase = self._get_latent(A_ )
else:
_lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A_ , A_ , A_ )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCamelCase = self.process_prompts(A_ )
_lowerCamelCase = self.process_prompts(A_ )
if save_final and save_path is None:
_lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A_ ):
os.makedirs(A_ )
else:
_lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(A_ )
_lowerCamelCase = save_path
_lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A_ ) )
_lowerCamelCase = loop_post_process(A_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(A_ , A_ , A_ ) ):
if show_intermediate:
show_pil(A_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A_ )} )
if show_final:
show_pil(A_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 638
| 0
|
"""simple docstring"""
import operator as op
def __lowercase ( snake_case_ : Any ) ->str:
'''simple docstring'''
__A : List[str] = []
__A : Any = lambda snake_case_ ,snake_case_ : int(x / y ) # noqa: E731 integer division operation
__A : Any = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) ,'''Action'''.center(12 ) ,'''Stack''' ,sep=''' | ''' )
print('''-''' * (30 + len(snake_case_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(snake_case_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) ,('''push(''' + x + ''')''').ljust(12 ) ,''','''.join(snake_case_ ) ,sep=''' | ''' )
else:
__A : int = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) ,('''pop(''' + b + ''')''').ljust(12 ) ,''','''.join(snake_case_ ) ,sep=''' | ''' )
__A : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) ,('''pop(''' + a + ''')''').ljust(12 ) ,''','''.join(snake_case_ ) ,sep=''' | ''' )
stack.append(
str(opr[x](int(snake_case_ ) ,int(snake_case_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) ,('''push(''' + a + x + b + ''')''').ljust(12 ) ,''','''.join(snake_case_ ) ,sep=''' | ''' ,)
return int(stack[0] )
if __name__ == "__main__":
a_ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 177
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = KandinskyVaaPipeline
_lowerCamelCase = [
"""image_embeds""",
"""negative_image_embeds""",
]
_lowerCamelCase = ["""image_embeds""", """negative_image_embeds"""]
_lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase = False
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 100
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Optional[Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__A : Optional[Any] = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.dummy_unet
__A : Optional[int] = self.dummy_movq
__A : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCamelCase , )
__A : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=0 ):
'''simple docstring'''
__A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__A : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
__A : Optional[Any] = torch.manual_seed(__lowerCamelCase )
else:
__A : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__A : Dict = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = '''cpu'''
__A : str = self.get_dummy_components()
__A : Dict = self.pipeline_class(**__lowerCamelCase )
__A : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : List[str] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
__A : Tuple = output.images
__A : Tuple = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
__A : Dict = image[0, -3:, -3:, -1]
__A : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A : Optional[int] = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__A : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
__A : int = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__A : Union[str, Any] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
__A : List[Any] = '''red cat, 4k photo'''
__A : Union[str, Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
__A , __A : List[str] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__A : Any = torch.Generator(device='''cuda''' ).manual_seed(0 )
__A : Optional[int] = pipeline(
image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , output_type='''np''' , )
__A : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 177
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = '''transfo-xl'''
A__ : List[str] = ['''mems''']
A__ : Optional[int] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _snake_case : List[str]=26_7735 , _snake_case : str=[2_0000, 4_0000, 20_0000] , _snake_case : List[str]=1024 , _snake_case : str=1024 , _snake_case : Optional[int]=16 , _snake_case : Union[str, Any]=64 , _snake_case : Tuple=4096 , _snake_case : Tuple=4 , _snake_case : Union[str, Any]=False , _snake_case : List[Any]=18 , _snake_case : int=1600 , _snake_case : List[str]=1000 , _snake_case : Union[str, Any]=True , _snake_case : List[Any]=True , _snake_case : Dict=0 , _snake_case : Any=-1 , _snake_case : Optional[int]=True , _snake_case : Dict=0.1 , _snake_case : Any=0.0 , _snake_case : Tuple=True , _snake_case : List[Any]="normal" , _snake_case : Optional[int]=0.01 , _snake_case : str=0.01 , _snake_case : Optional[Any]=0.02 , _snake_case : Optional[Any]=1E-5 , _snake_case : Dict=0 , **_snake_case : Optional[Any] , ):
__lowercase : Optional[int] = vocab_size
__lowercase : int = []
self.cutoffs.extend(_snake_case )
if proj_share_all_but_first:
__lowercase : Dict = [False] + [True] * len(self.cutoffs )
else:
__lowercase : Tuple = [False] + [False] * len(self.cutoffs )
__lowercase : int = d_model
__lowercase : Optional[int] = d_embed
__lowercase : Dict = d_head
__lowercase : List[Any] = d_inner
__lowercase : Tuple = div_val
__lowercase : Optional[Any] = pre_lnorm
__lowercase : List[Any] = n_layer
__lowercase : int = n_head
__lowercase : Any = mem_len
__lowercase : Any = same_length
__lowercase : Dict = attn_type
__lowercase : Dict = clamp_len
__lowercase : Tuple = sample_softmax
__lowercase : List[str] = adaptive
__lowercase : Any = dropout
__lowercase : Dict = dropatt
__lowercase : Optional[int] = untie_r
__lowercase : Dict = init
__lowercase : Optional[int] = init_range
__lowercase : Optional[int] = proj_init_std
__lowercase : Tuple = init_std
__lowercase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=_snake_case , **_snake_case )
@property
def snake_case_ ( self : Optional[Any] ):
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : str , _snake_case : Optional[int] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 284
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = None
A__ : Any = BloomTokenizerFast
A__ : Optional[Any] = BloomTokenizerFast
A__ : List[Any] = True
A__ : int = False
A__ : Any = '''tokenizer_file'''
A__ : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def snake_case_ ( self : Any ):
super().setUp()
__lowercase : int = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Dict , **_snake_case : Any ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self : Any ):
__lowercase : Dict = self.get_rust_tokenizer()
__lowercase : Optional[int] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
__lowercase : Union[str, Any] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
__lowercase : Optional[int] = tokenizer.batch_encode_plus(_snake_case )['''input_ids''']
self.assertListEqual(_snake_case , _snake_case )
__lowercase : List[Any] = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : str , _snake_case : int=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowercase : List[Any] = '''This is a simple input'''
__lowercase : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
__lowercase : Any = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
__lowercase : Dict = None # Hotfixing padding = None
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
def snake_case_ ( self : Optional[int] ):
__lowercase : List[Any] = self.get_rust_tokenizer()
__lowercase : int = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=_snake_case )
__lowercase : Optional[Any] = next(iter(_snake_case ) )['''premise'''] # pick up one data
__lowercase : List[str] = list(sample_data.values() )
__lowercase : List[str] = list(map(tokenizer.encode , _snake_case ) )
__lowercase : Tuple = [tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case ) for x in output_tokens]
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : List[Any] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 284
| 1
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
A__ : Any =logging.get_logger(__name__)
# General docstring
A__ : Optional[Any] ='''RegNetConfig'''
# Base docstring
A__ : Dict ='''facebook/regnet-y-040'''
A__ : str =[1, 10_88, 7, 7]
# Image classification docstring
A__ : Union[str, Any] ='''facebook/regnet-y-040'''
A__ : int ='''tabby, tabby cat'''
A__ : Optional[Any] =[
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( nn.Module ):
def __init__( self : str , __snake_case : int , __snake_case : int , __snake_case : int = 3 , __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : Optional[str] = "relu" , ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase = nn.Convad(
__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=kernel_size // 2 , groups=__snake_case , bias=__snake_case , )
_lowerCAmelCase = nn.BatchNormad(__snake_case )
_lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase__ ( self : List[Any] , __snake_case : Dict ) -> List[str]:
_lowerCAmelCase = self.convolution(__snake_case )
_lowerCAmelCase = self.normalization(__snake_case )
_lowerCAmelCase = self.activation(__snake_case )
return hidden_state
class UpperCAmelCase ( nn.Module ):
def __init__( self : Dict , __snake_case : RegNetConfig ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase = config.num_channels
def lowercase__ ( self : Union[str, Any] , __snake_case : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_lowerCAmelCase = self.embedder(__snake_case )
return hidden_state
class UpperCAmelCase ( nn.Module ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int = 2 ) -> Optional[int]:
super().__init__()
_lowerCAmelCase = nn.Convad(__snake_case , __snake_case , kernel_size=1 , stride=__snake_case , bias=__snake_case )
_lowerCAmelCase = nn.BatchNormad(__snake_case )
def lowercase__ ( self : int , __snake_case : Tensor ) -> Tensor:
_lowerCAmelCase = self.convolution(__snake_case )
_lowerCAmelCase = self.normalization(__snake_case )
return hidden_state
class UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , __snake_case : int , __snake_case : int ) -> int:
super().__init__()
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
_lowerCAmelCase = nn.Sequential(
nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.ReLU() , nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.Sigmoid() , )
def lowercase__ ( self : List[str] , __snake_case : List[str] ) -> Optional[Any]:
# b c h w -> b c 1 1
_lowerCAmelCase = self.pooler(__snake_case )
_lowerCAmelCase = self.attention(__snake_case )
_lowerCAmelCase = hidden_state * attention
return hidden_state
class UpperCAmelCase ( nn.Module ):
def __init__( self : Tuple , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ) -> Optional[int]:
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def lowercase__ ( self : int , __snake_case : str ) -> List[Any]:
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(__snake_case )
_lowerCAmelCase = self.shortcut(__snake_case )
hidden_state += residual
_lowerCAmelCase = self.activation(__snake_case )
return hidden_state
class UpperCAmelCase ( nn.Module ):
def __init__( self : str , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase = in_channels != out_channels or stride != 1
_lowerCAmelCase = max(1 , out_channels // config.groups_width )
_lowerCAmelCase = (
RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase = nn.Sequential(
RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , )
_lowerCAmelCase = ACTaFN[config.hidden_act]
def lowercase__ ( self : Optional[int] , __snake_case : Optional[Any] ) -> Union[str, Any]:
_lowerCAmelCase = hidden_state
_lowerCAmelCase = self.layer(__snake_case )
_lowerCAmelCase = self.shortcut(__snake_case )
hidden_state += residual
_lowerCAmelCase = self.activation(__snake_case )
return hidden_state
class UpperCAmelCase ( nn.Module ):
def __init__( self : List[Any] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 2 , ) -> str:
super().__init__()
_lowerCAmelCase = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__snake_case , __snake_case , __snake_case , stride=__snake_case , ) , *[layer(__snake_case , __snake_case , __snake_case ) for _ in range(depth - 1 )] , )
def lowercase__ ( self : str , __snake_case : Tuple ) -> List[str]:
_lowerCAmelCase = self.layers(__snake_case )
return hidden_state
class UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , __snake_case : RegNetConfig ) -> int:
super().__init__()
_lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__snake_case , config.depths[1:] ):
self.stages.append(RegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case ) )
def lowercase__ ( self : Optional[int] , __snake_case : Tensor , __snake_case : bool = False , __snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
_lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
_lowerCAmelCase = stage_module(__snake_case )
if output_hidden_states:
_lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
class UpperCAmelCase ( snake_case_ ):
_lowercase: List[Any] = RegNetConfig
_lowercase: Optional[int] = '''regnet'''
_lowercase: List[str] = '''pixel_values'''
_lowercase: Tuple = True
def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[Any] ) -> Tuple:
if isinstance(__snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase__ ( self : Tuple , __snake_case : List[Any] , __snake_case : Any=False ) -> str:
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = value
A__ : Dict =r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A__ : int =r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCAmelCase ( snake_case_ ):
def __init__( self : int , __snake_case : Optional[int] ) -> Union[str, Any]:
super().__init__(__snake_case )
_lowerCAmelCase = config
_lowerCAmelCase = RegNetEmbeddings(__snake_case )
_lowerCAmelCase = RegNetEncoder(__snake_case )
_lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self : Union[str, Any] , __snake_case : Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.embedder(__snake_case )
_lowerCAmelCase = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(__snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCAmelCase ( snake_case_ ):
def __init__( self : List[Any] , __snake_case : int ) -> str:
super().__init__(__snake_case )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = RegNetModel(__snake_case )
# classification head
_lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self : List[str] , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.LongTensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
_lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase = self.regnet(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
_lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase = self.classifier(__snake_case )
_lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase = """single_label_classification"""
else:
_lowerCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
_lowerCAmelCase = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase = BCEWithLogitsLoss()
_lowerCAmelCase = loss_fct(__snake_case , __snake_case )
if not return_dict:
_lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 207
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: Optional[int] = ProphetNetTokenizer
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Dict ) -> Optional[int]:
super().setUp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowercase__ ( self : Tuple , __snake_case : str ) -> List[str]:
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__snake_case , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : Dict ) -> Optional[Any]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowercase__ ( self : str ) -> List[str]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Tuple ) -> str:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase__ ( self : Dict ) -> Optional[int]:
_lowerCAmelCase = BasicTokenizer(do_lower_case=__snake_case , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowercase__ ( self : Any ) -> Dict:
_lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase = {}
for i, token in enumerate(__snake_case ):
_lowerCAmelCase = i
_lowerCAmelCase = WordpieceTokenizer(vocab=__snake_case , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
_lowerCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowercase__ ( self : List[Any] ) -> Any:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowercase__ ( self : int ) -> int:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def lowercase__ ( self : int ) -> Optional[int]:
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
_lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 207
| 1
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[Any]=None , ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , use_stable_embedding=UpperCAmelCase_ , )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = OpenLlamaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = OpenLlamaModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , )
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ) -> int:
"""simple docstring"""
_lowerCAmelCase = OpenLlamaForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = OpenLlamaForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# first forward pass
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , )
_lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )['hidden_states'][0]
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )['hidden_states'][0]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def __lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_: Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_: Optional[Any] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[Any] = False
def __lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = OpenLlamaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase_ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = OpenLlamaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = 'single_label_classification'
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase_ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = OpenLlamaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = 'multi_label_classification'
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase_ )
_lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase = OpenLlamaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : int ) -> Any:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
_lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = OpenLlamaModel(UpperCAmelCase_ )
original_model.to(UpperCAmelCase_ )
original_model.eval()
_lowerCAmelCase = original_model(UpperCAmelCase_ ).last_hidden_state
_lowerCAmelCase = original_model(UpperCAmelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
_lowerCAmelCase = OpenLlamaModel(UpperCAmelCase_ )
scaled_model.to(UpperCAmelCase_ )
scaled_model.eval()
_lowerCAmelCase = scaled_model(UpperCAmelCase_ ).last_hidden_state
_lowerCAmelCase = scaled_model(UpperCAmelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5 ) )
| 491
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Tuple=None , ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MPNetForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MPNetForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_: int = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: Dict = True
def __lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_ )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_lowerCAmelCase = model(UpperCAmelCase_ )[0]
_lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_lowerCAmelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 491
| 1
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[int]:
"""simple docstring"""
lowerCAmelCase_ : Dict = [0] * no_of_processes
lowerCAmelCase_ : List[str] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__UpperCamelCase ):
lowerCAmelCase_ : List[Any] = burst_time[i]
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 999999999
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__UpperCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase_ : str = remaining_time[j]
lowerCAmelCase_ : Tuple = j
lowerCAmelCase_ : int = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase_ : Any = remaining_time[short]
if minm == 0:
lowerCAmelCase_ : str = 999999999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase_ : List[str] = False
# Find finish time of current process
lowerCAmelCase_ : Optional[int] = increment_time + 1
# Calculate waiting time
lowerCAmelCase_ : str = finish_time - arrival_time[short]
lowerCAmelCase_ : Optional[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase_ : Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list[int]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = [0] * no_of_processes
for i in range(__UpperCamelCase ):
lowerCAmelCase_ : str = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Union[str, Any] = 0
for i in range(__UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = total_waiting_time + waiting_time[i]
lowerCAmelCase_ : Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
lowercase__ = int(input())
lowercase__ = [0] * no_of_processes
lowercase__ = [0] * no_of_processes
lowercase__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
lowercase__ , lowercase__ = map(int, input().split())
lowercase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase__ = burst_time
lowercase__ = no_of_processes
lowercase__ = waiting_time
lowercase__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowercase__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 610
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 610
| 1
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
snake_case__ : List[str] = logging.get_logger("""transformers.models.speecht5""")
snake_case__ : Optional[Any] = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
snake_case__ : Any = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
snake_case__ : List[Any] = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
snake_case__ : List[str] = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
snake_case__ : Tuple = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
snake_case__ : Dict = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
snake_case__ : int = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
snake_case__ : str = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
snake_case__ : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
snake_case__ : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
snake_case__ : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
snake_case__ : Dict = []
snake_case__ : Optional[Any] = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
snake_case__ : Tuple = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
snake_case__ : Any = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
snake_case__ : int = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def _snake_case ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase):
for attribute in key.split('.'):
UpperCamelCase_ = getattr(__a , __a)
if weight_type is not None:
UpperCamelCase_ = getattr(__a , __a).shape
else:
UpperCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""")
if weight_type == "weight":
UpperCamelCase_ = value
elif weight_type == "weight_g":
UpperCamelCase_ = value
elif weight_type == "weight_v":
UpperCamelCase_ = value
elif weight_type == "bias":
UpperCamelCase_ = value
elif weight_type == "running_mean":
UpperCamelCase_ = value
elif weight_type == "running_var":
UpperCamelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCamelCase_ = value
else:
UpperCamelCase_ = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""")
def _snake_case ( __lowercase , __lowercase):
for key in ignore_keys:
if key.endswith('.*'):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
UpperCamelCase_ , UpperCamelCase_ = key.split('.*.')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _snake_case ( __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = []
if task == "s2t":
UpperCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase_ = MAPPING_S2T
UpperCamelCase_ = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase_ = None
UpperCamelCase_ = MAPPING_T2S
UpperCamelCase_ = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase_ = MAPPING_S2S
UpperCamelCase_ = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""")
for name, value in fairseq_dict.items():
if should_ignore(__a , __a):
logger.info(f"""{name} was ignored""")
continue
UpperCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase_ , UpperCamelCase_ = key.split('.*.')
if prefix in name and suffix in name:
UpperCamelCase_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase_ = True
if "*" in mapped_key:
UpperCamelCase_ = name.split(__a)[0].split('.')[-2]
UpperCamelCase_ = mapped_key.replace('*' , __a)
if "weight_g" in name:
UpperCamelCase_ = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_ = 'weight_v'
elif "bias" in name:
UpperCamelCase_ = 'bias'
elif "weight" in name:
UpperCamelCase_ = 'weight'
elif "running_mean" in name:
UpperCamelCase_ = 'running_mean'
elif "running_var" in name:
UpperCamelCase_ = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase_ = 'num_batches_tracked'
else:
UpperCamelCase_ = None
set_recursively(__a , __a , __a , __a , __a)
continue
if not is_used:
unused_weights.append(__a)
logger.warning(f"""Unused weights: {unused_weights}""")
def _snake_case ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ = full_name.split('conv_layers.')[-1]
UpperCamelCase_ = name.split('.')
UpperCamelCase_ = int(items[0])
UpperCamelCase_ = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""")
UpperCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""")
UpperCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""")
UpperCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""")
UpperCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(__a)
@torch.no_grad()
def _snake_case ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , ):
if config_path is not None:
UpperCamelCase_ = SpeechTaConfig.from_pretrained(__a)
else:
UpperCamelCase_ = SpeechTaConfig()
if task == "s2t":
UpperCamelCase_ = config.max_text_positions
UpperCamelCase_ = SpeechTaForSpeechToText(__a)
elif task == "t2s":
UpperCamelCase_ = 1876
UpperCamelCase_ = 600
UpperCamelCase_ = config.max_speech_positions
UpperCamelCase_ = SpeechTaForTextToSpeech(__a)
elif task == "s2s":
UpperCamelCase_ = 1876
UpperCamelCase_ = config.max_speech_positions
UpperCamelCase_ = SpeechTaForSpeechToSpeech(__a)
else:
raise ValueError(f"""Unknown task name: {task}""")
if vocab_path:
UpperCamelCase_ = SpeechTaTokenizer(__a , model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase_ = AddedToken('<mask>' , lstrip=__a , rstrip=__a)
UpperCamelCase_ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
UpperCamelCase_ = SpeechTaFeatureExtractor()
UpperCamelCase_ = SpeechTaProcessor(tokenizer=__a , feature_extractor=__a)
processor.save_pretrained(__a)
UpperCamelCase_ = torch.load(__a)
recursively_load_weights(fairseq_checkpoint['model'] , __a , __a)
model.save_pretrained(__a)
if repo_id:
print('Pushing to the hub...')
processor.push_to_hub(__a)
model.push_to_hub(__a)
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
snake_case__ : List[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 720
|
def _snake_case (__lowercase , __lowercase , __lowercase):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowercase))
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
# Base Case
if index == len(__lowercase):
return True
# Recursive Step
for i in range(__lowercase):
if valid_coloring(graph[index] , __lowercase , __lowercase):
# Color current vertex
UpperCamelCase_ = i
# Validate coloring
if util_color(__lowercase , __lowercase , __lowercase , index + 1):
return True
# Backtrack
UpperCamelCase_ = -1
return False
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = [-1] * len(__lowercase)
if util_color(__lowercase , __lowercase , __lowercase , 0):
return colored_vertices
return []
| 618
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.