code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , ) -> int:
"""simple docstring"""
a_ =parent
a_ =1_3
a_ =7
a_ =True
a_ =True
a_ =False
a_ =True
a_ =9_9
a_ =3_2
a_ =2
a_ =4
a_ =3_7
a_ ="gelu"
a_ =0.1
a_ =0.1
a_ =5_1_2
a_ =1_6
a_ =2
a_ =0.0_2
a_ =3
a_ =4
a_ =None
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ =None
if self.use_input_mask:
a_ =random_attention_mask([self.batch_size, self.seq_length])
a_ =None
a_ =None
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ =ids_tensor([self.batch_size] , self.num_choices)
a_ =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =TFDistilBertModel(config=lowerCAmelCase_)
a_ ={"input_ids": input_ids, "attention_mask": input_mask}
a_ =model(lowerCAmelCase_)
a_ =[input_ids, input_mask]
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =TFDistilBertForMaskedLM(config=lowerCAmelCase_)
a_ ={"input_ids": input_ids, "attention_mask": input_mask}
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =TFDistilBertForQuestionAnswering(config=lowerCAmelCase_)
a_ ={
"input_ids": input_ids,
"attention_mask": input_mask,
}
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =self.num_labels
a_ =TFDistilBertForSequenceClassification(lowerCAmelCase_)
a_ ={"input_ids": input_ids, "attention_mask": input_mask}
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =self.num_choices
a_ =TFDistilBertForMultipleChoice(lowerCAmelCase_)
a_ =tf.tile(tf.expand_dims(lowerCAmelCase_ , 1) , (1, self.num_choices, 1))
a_ =tf.tile(tf.expand_dims(lowerCAmelCase_ , 1) , (1, self.num_choices, 1))
a_ ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =self.num_labels
a_ =TFDistilBertForTokenClassification(lowerCAmelCase_)
a_ ={"input_ids": input_ids, "attention_mask": input_mask}
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
((a_) , (a_) , (a_) , (a_) , (a_) , (a_)) =config_and_inputs
a_ ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__magic_name__ : int = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ : List[Any] = False
__magic_name__ : str = False
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =TFDistilBertModelTester(self)
a_ =ConfigTester(self , config_class=lowerCAmelCase_ , dim=3_7)
def lowercase_ ( self) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_)
@slow
def lowercase_ ( self) -> Any:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]):
a_ =TFDistilBertModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@require_tf
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =TFDistilBertModel.from_pretrained("distilbert-base-uncased")
a_ =tf.constant([[0, 1, 2, 3, 4, 5]])
a_ =model(lowerCAmelCase_)[0]
a_ =[1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCAmelCase_)
a_ =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4)
| 41
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 1
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = "openai/clip-vit-large-patch14") -> None:
"""simple docstring"""
a_ =device
a_ =CLIPTokenizerFast.from_pretrained(lowerCAmelCase_)
a_ =[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
a_ =[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
a_ =torchvision.transforms.Normalize(self.image_mean , self.image_std)
a_ =torchvision.transforms.Resize(2_2_4)
a_ =torchvision.transforms.CenterCrop(2_2_4)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =self.resize(lowerCAmelCase_)
a_ =self.center_crop(lowerCAmelCase_)
a_ =self.normalize(lowerCAmelCase_)
return images
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =self.preprocess_img(lowerCAmelCase_)
a_ ={key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self , lowerCAmelCase_=1_0 , lowerCAmelCase_=0.0_1 , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="image" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> None:
"""simple docstring"""
super().__init__()
a_ =None
a_ =device if device else get_device()
if vqgan:
a_ =vqgan
else:
a_ =load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_)
self.vqgan.eval()
if clip:
a_ =clip
else:
a_ =CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
a_ =ProcessorGradientFlow(device=self.device)
a_ =iterations
a_ =lr
a_ =log
a_ =make_grid
a_ =return_val
a_ =quantize
a_ =self.vqgan.decoder.z_shape
def lowercase_ ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5 , lowerCAmelCase_=True) -> Any:
"""simple docstring"""
a_ =[]
if output_path is None:
a_ ="./animation.gif"
if input_path is None:
a_ =self.save_path
a_ =sorted(glob(input_path + "/*"))
if not len(lowerCAmelCase_):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(lowerCAmelCase_) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
a_ =total_duration / len(lowerCAmelCase_)
a_ =[frame_duration] * len(lowerCAmelCase_)
if extend_frames:
a_ =1.5
a_ =3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(lowerCAmelCase_))
imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_)
print(f"""gif saved to {output_path}""")
def lowercase_ ( self , lowerCAmelCase_=None , lowerCAmelCase_=None) -> str:
"""simple docstring"""
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
a_ =preprocess(Image.open(lowerCAmelCase_) , target_image_size=2_5_6).to(self.device)
a_ =preprocess_vqgan(lowerCAmelCase_)
a_ , *a_ =self.vqgan.encode(lowerCAmelCase_)
return z
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =self.latent.detach().requires_grad_()
a_ =base_latent + transform_vector
if self.quantize:
a_ , *a_ =self.vqgan.quantize(lowerCAmelCase_)
else:
a_ =trans_latent
return self.vqgan.decode(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None) -> Tuple:
"""simple docstring"""
a_ =self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors="pt" , padding=lowerCAmelCase_)
a_ =self.clip(**lowerCAmelCase_)
a_ =clip_outputs.logits_per_image
if weights is not None:
a_ =similarity_logits * weights
return similarity_logits.sum()
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =self._get_clip_similarity(pos_prompts["prompts"] , lowerCAmelCase_ , weights=(1 / pos_prompts["weights"]))
if neg_prompts:
a_ =self._get_clip_similarity(neg_prompts["prompts"] , lowerCAmelCase_ , weights=neg_prompts["weights"])
else:
a_ =torch.tensor([1] , device=self.device)
a_ =-torch.log(lowerCAmelCase_) + torch.log(lowerCAmelCase_)
return loss
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device)
a_ =torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
a_ =self._add_vector(lowerCAmelCase_)
a_ =loop_post_process(lowerCAmelCase_)
a_ =self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
print("CLIP loss" , lowerCAmelCase_)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=lowerCAmelCase_)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
wandb.init(reinit=lowerCAmelCase_ , project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
a_ =Image.open(lowerCAmelCase_)
a_ =image.resize((2_5_6, 2_5_6))
wandb.log("Original Image" , wandb.Image(lowerCAmelCase_))
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
if not prompts:
return []
a_ =[]
a_ =[]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =[prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(lowerCAmelCase_ , (tuple, list)):
a_ =prompt[0]
a_ =float(prompt[1])
elif ":" in prompt:
a_ , a_ =prompt.split(":")
a_ =float(lowerCAmelCase_)
else:
a_ =prompt
a_ =1.0
processed_prompts.append(lowerCAmelCase_)
weights.append(lowerCAmelCase_)
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_ , device=self.device),
}
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , ) -> str:
"""simple docstring"""
if image_path:
a_ =self._get_latent(lowerCAmelCase_)
else:
a_ =torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
assert pos_prompts, "You must provide at least one positive prompt."
a_ =self.process_prompts(lowerCAmelCase_)
a_ =self.process_prompts(lowerCAmelCase_)
if save_final and save_path is None:
a_ =os.path.join("./outputs/" , "_".join(pos_prompts["prompts"]))
if not os.path.exists(lowerCAmelCase_):
os.makedirs(lowerCAmelCase_)
else:
a_ =save_path + "_" + get_timestamp()
os.makedirs(lowerCAmelCase_)
a_ =save_path
a_ =self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(lowerCAmelCase_))
a_ =loop_post_process(lowerCAmelCase_)
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)):
if show_intermediate:
show_pil(lowerCAmelCase_)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png"""))
if self.log:
wandb.log({"Image": wandb.Image(lowerCAmelCase_)})
if show_final:
show_pil(lowerCAmelCase_)
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png"""))
| 41
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase_ ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 41
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 1
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =4
a_ =3
a_ =(3_2, 3_2)
a_ =jax.random.PRNGKey(0)
a_ =jax.random.uniform(lowerCAmelCase_ , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ ={
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
a_ =self.dummy_input
return init_dict, inputs_dict
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 1
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_=0.0_1 , lowerCAmelCase_=1_0_0_0) -> List[str]:
"""simple docstring"""
a_ =p_stop
a_ =max_length
def __iter__( self) -> List[str]:
"""simple docstring"""
a_ =0
a_ =False
while not stop and count < self.max_length:
yield count
count += 1
a_ =random.random() < self.p_stop
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=True) -> str:
"""simple docstring"""
a_ =[
BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_)
for i in range(2)
]
a_ =[list(lowerCAmelCase_) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase_) for shard in batch_sampler_shards] , [len(lowerCAmelCase_) for e in expected])
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =BatchSampler(range(2_4) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
a_ =BatchSampler(range(2_4) , batch_size=3 , drop_last=lowerCAmelCase_)
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
a_ =BatchSampler(range(2_1) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
a_ =BatchSampler(range(2_1) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
a_ =BatchSampler(range(2_2) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
a_ =BatchSampler(range(2_2) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
a_ =BatchSampler(range(2_0) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
a_ =BatchSampler(range(2_0) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
# Check the shards when the dataset is very small.
a_ =BatchSampler(range(2) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
a_ =BatchSampler(range(2) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =BatchSampler(range(2_4) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_4) , batch_size=4 , drop_last=lowerCAmelCase_)
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_)
# Check the shards when the dataset is not a round multiple of batch size.
a_ =BatchSampler(range(2_2) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_2) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
a_ =BatchSampler(range(2_1) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_1) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_)
# Check the shards when the dataset is very small.
a_ =BatchSampler(range(2) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =BatchSampler(range(2_4) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_4) , batch_size=3 , drop_last=lowerCAmelCase_)
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
a_ =BatchSampler(range(2_1) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_1) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
a_ =BatchSampler(range(2_2) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_2) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
a_ =BatchSampler(range(2_0) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_0) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
# Check the shards when the dataset is very small.
a_ =BatchSampler(range(2) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2) , batch_size=3 , drop_last=lowerCAmelCase_)
a_ =[[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =BatchSampler(range(2_4) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_4) , batch_size=4 , drop_last=lowerCAmelCase_)
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_)
# Check the shards when the dataset is not a round multiple of batch size.
a_ =BatchSampler(range(2_2) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_2) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
a_ =BatchSampler(range(2_1) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2_1) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_)
# Check the shards when the dataset is very small.
a_ =BatchSampler(range(2) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_)
a_ =BatchSampler(range(2) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =[[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =[[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
a_ =[BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , even_batches=lowerCAmelCase_) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]) , 3)
self.assertEqual(len(batch_sampler_shards[1]) , 2)
self.assertListEqual(list(batch_sampler_shards[0]) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]])
self.assertListEqual(list(batch_sampler_shards[1]) , [[3, 4], [9, 1_0, 1_1]])
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=False) -> int:
"""simple docstring"""
random.seed(lowerCAmelCase_)
a_ =list(lowerCAmelCase_)
a_ =[
IterableDatasetShard(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ , num_processes=lowerCAmelCase_ , process_index=lowerCAmelCase_ , split_batches=lowerCAmelCase_ , )
for i in range(lowerCAmelCase_)
]
a_ =[]
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase_)
iterable_dataset_lists.append(list(lowerCAmelCase_))
a_ =batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
a_ =iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
self.assertTrue(len(lowerCAmelCase_) % shard_batch_size == 0)
a_ =[]
for idx in range(0 , len(lowerCAmelCase_) , lowerCAmelCase_):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase_) < len(lowerCAmelCase_):
reference += reference
self.assertListEqual(lowerCAmelCase_ , reference[: len(lowerCAmelCase_)])
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =4_2
a_ =RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_)
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_)
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_)
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_)
# Edge case with a very small dataset
a_ =RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_)
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_)
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_)
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =BatchSampler(range(1_6) , batch_size=4 , drop_last=lowerCAmelCase_)
a_ =SkipBatchSampler(lowerCAmelCase_ , 2)
self.assertListEqual(list(lowerCAmelCase_) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]])
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =SkipDataLoader(list(range(1_6)) , batch_size=4 , skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]])
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =DataLoader(list(range(1_6)) , batch_size=4)
a_ =skip_first_batches(lowerCAmelCase_ , num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]])
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =DataLoaderShard(list(range(1_6)) , batch_size=4)
for idx, _ in enumerate(lowerCAmelCase_):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
def lowercase_ ( self) -> int:
"""simple docstring"""
Accelerator()
a_ =DataLoaderDispatcher(range(1_6) , batch_size=4)
for idx, _ in enumerate(lowerCAmelCase_):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
| 41
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 1
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase = logging.get_logger(__name__)
lowercase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowercase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
a_ =model_type_to_module_name(lowercase__ )
a_ =importlib.import_module(F""".{module_name}""" , "transformers.models" )
try:
return getattr(lowercase__ , lowercase__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase__ , "__name__" , lowercase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a_ =importlib.import_module("transformers" )
if hasattr(lowercase__ , lowercase__ ):
return getattr(lowercase__ , lowercase__ )
return None
def UpperCAmelCase_ ( lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ):
'''simple docstring'''
a_ =get_file_from_repo(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(lowercase__ , encoding="utf-8" ) as reader:
return json.load(lowercase__ )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self) -> Tuple:
"""simple docstring"""
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.")
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase_)
def lowercase_ ( cls , lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =kwargs.pop("config" , lowerCAmelCase_)
a_ =kwargs.pop("trust_remote_code" , lowerCAmelCase_)
a_ =True
a_ , a_ =ImageProcessingMixin.get_image_processor_dict(lowerCAmelCase_ , **lowerCAmelCase_)
a_ =config_dict.get("image_processor_type" , lowerCAmelCase_)
a_ =None
if "AutoImageProcessor" in config_dict.get("auto_map" , {}):
a_ =config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
a_ =config_dict.pop("feature_extractor_type" , lowerCAmelCase_)
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration.")
a_ =feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor")
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {}):
a_ =config_dict["auto_map"]["AutoFeatureExtractor"]
a_ =feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor")
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration.")
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_)
# It could be in `config.image_processor_type``
a_ =getattr(lowerCAmelCase_ , "image_processor_type" , lowerCAmelCase_)
if hasattr(lowerCAmelCase_ , "auto_map") and "AutoImageProcessor" in config.auto_map:
a_ =config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
a_ =image_processor_class_from_name(lowerCAmelCase_)
a_ =image_processor_auto_map is not None
a_ =image_processor_class is not None or type(lowerCAmelCase_) in IMAGE_PROCESSOR_MAPPING
a_ =resolve_trust_remote_code(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if has_remote_code and trust_remote_code:
a_ =get_class_from_dynamic_module(
lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
a_ =kwargs.pop("code_revision" , lowerCAmelCase_)
if os.path.isdir(lowerCAmelCase_):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCAmelCase_) in IMAGE_PROCESSOR_MAPPING:
a_ =IMAGE_PROCESSOR_MAPPING[type(lowerCAmelCase_)]
return image_processor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}""")
@staticmethod
def lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(lowerCAmelCase_ , lowerCAmelCase_)
| 41
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCAmelCase_ ( lowercase__ = 2_0_0_0_0_0_0 ):
'''simple docstring'''
a_ =[0]
a_ =42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
a_ =0
# the area corresponding to the grid that gives the product closest to target
a_ =0
# an estimate of b, using the quadratic formula
a_ =42
# the largest integer less than b_estimate
a_ =42
# the largest integer less than b_estimate
a_ =42
# the triangle number corresponding to b_floor
a_ =42
# the triangle number corresponding to b_ceil
a_ =42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
a_ =(-1 + sqrt(1 + 8 * target / triangle_a )) / 2
a_ =floor(lowercase__ )
a_ =ceil(lowercase__ )
a_ =triangle_numbers[b_floor]
a_ =triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
a_ =triangle_b_first_guess * triangle_a
a_ =idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
a_ =triangle_b_second_guess * triangle_a
a_ =idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =Sql(
cache_dir=lowerCAmelCase_ , features=lowerCAmelCase_ , sql=lowerCAmelCase_ , con=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =None
a_ =None
a_ =None
a_ =None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , )
# Build dataset for splits
a_ =self.builder.as_dataset(
split="train" , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory)
return dataset
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""")
a_ =dataset
a_ =name
a_ =con
a_ =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a_ =num_proc
a_ =to_sql_kwargs
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.to_sql_kwargs.pop("sql" , lowerCAmelCase_)
a_ =self.to_sql_kwargs.pop("con" , lowerCAmelCase_)
a_ =self.to_sql_kwargs.pop("index" , lowerCAmelCase_)
a_ =self._write(index=lowerCAmelCase_ , **self.to_sql_kwargs)
return written
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ , a_ , a_ =args
a_ ={**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
a_ =query_table(
table=self.dataset.data , key=slice(lowerCAmelCase_ , offset + self.batch_size) , indices=self.dataset._indices , )
a_ =batch.to_pandas()
a_ =df.to_sql(self.name , self.con , index=lowerCAmelCase_ , **lowerCAmelCase_)
return num_rows or len(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
a_ , a_ =len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCAmelCase_ , lowerCAmelCase_)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 41
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 1
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowercase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 1_0:
a_ =key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 1_0:
a_ =key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 1_0:
a_ =key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 1_0:
a_ =key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
a_ =key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
a_ =key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a_ =key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
a_ =key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ ={}
import re
a_ =re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a_ =re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
a_ =re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
a_ =re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
a_ =re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase__ ):
a_ =re_encoder_block_conv_in.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[2] ) * 2 + int(groups[3] )
a_ =F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
a_ =re_encoder_block_conv_in.sub(lowercase__ , lowercase__ )
elif re_encoder_block_resnet.fullmatch(lowercase__ ):
a_ =re_encoder_block_resnet.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[2] ) * 2 + int(groups[3] )
a_ ={"1": 1, "3": 2}[groups[-2]]
a_ =F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
a_ =F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a_ =prefix + resnet_block
a_ =re_encoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_encoder_block_proj_out.fullmatch(lowercase__ ):
a_ =re_encoder_block_proj_out.match(lowercase__ )
a_ =regex_match.groups()
a_ =F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
a_ =re_encoder_block_proj_out.sub(lowercase__ , lowercase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase__ ):
a_ =re_decoder_block_conv_out.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ =F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
a_ =re_decoder_block_conv_out.sub(lowercase__ , lowercase__ )
elif re_decoder_block_resnet.fullmatch(lowercase__ ):
a_ =re_decoder_block_resnet.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ ={"1": 1, "3": 2}[groups[-2]]
a_ =F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
a_ =F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a_ =prefix + resnet_block
a_ =re_decoder_block_resnet.sub(lowercase__ , lowercase__ )
elif re_decoder_block_proj_in.fullmatch(lowercase__ ):
a_ =re_decoder_block_proj_in.match(lowercase__ )
a_ =regex_match.groups()
a_ =F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
a_ =re_decoder_block_proj_in.sub(lowercase__ , lowercase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase__ ):
a_ =re_prior_cond_conv_out.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ =F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
a_ =re_prior_cond_conv_out.sub(lowercase__ , lowercase__ )
elif re_prior_cond_resnet.fullmatch(lowercase__ ):
a_ =re_prior_cond_resnet.match(lowercase__ )
a_ =regex_match.groups()
a_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ ={"1": 1, "3": 2}[groups[-2]]
a_ =F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
a_ =F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
a_ =prefix + resnet_block
a_ =re_prior_cond_resnet.sub(lowercase__ , lowercase__ )
elif re_prior_cond_proj_in.fullmatch(lowercase__ ):
a_ =re_prior_cond_proj_in.match(lowercase__ )
a_ =regex_match.groups()
a_ =F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
a_ =re_prior_cond_proj_in.sub(lowercase__ , lowercase__ )
# keep original key
else:
a_ =original_key
a_ =replace_key(lowercase__ )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
a_ =model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
a_ =original_key
a_ =original_key
a_ =value
return new_dict
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__=None , lowercase__=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
a_ =requests.get(F"""{PREFIX}{file}""" , allow_redirects=lowercase__ )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=lowercase__ )
open(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , "wb" ).write(r.content )
a_ =MODEL_MAPPING[model_name.split("/" )[-1]]
a_ =JukeboxConfig.from_pretrained(lowercase__ )
a_ =JukeboxModel(lowercase__ )
a_ =[]
a_ ={}
for i, dict_name in enumerate(lowercase__ ):
a_ =torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )["model"]
a_ ={}
for k in old_dic.keys():
if k.endswith(".b" ):
a_ =old_dic[k]
elif k.endswith(".w" ):
a_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a_ =old_dic[k]
else:
a_ =old_dic[k]
a_ ="vqvae" if i == 0 else F"""priors.{3 - i}"""
a_ =fix_jukebox_keys(lowercase__ , model.state_dict() , lowercase__ , lowercase__ )
weight_dict.append(lowercase__ )
a_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase__ )
for i in range(len(lowercase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(lowercase__ , lowercase__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
return weight_dict
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowercase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 1
|
'''simple docstring'''
import heapq
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase__ , [-1 * len(lowercase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
a_ =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
a_ =heapq.heappop(lowercase__ )[1][0]
chosen_vertices.add(lowercase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
a_ =elem[1][1].index(lowercase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 41
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 1
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
lowercase = {
'''169M''': 768,
'''430M''': 1_024,
'''1B5''': 2_048,
'''3B''': 2_560,
'''7B''': 4_096,
'''14B''': 5_120,
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =list(state_dict.keys() )
for name in state_dict_keys:
a_ =state_dict.pop(lowercase__ )
# emb -> embedding
if name.startswith("emb." ):
a_ =name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
a_ =name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
a_ =re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , lowercase__ )
# ffn -> feed_forward
a_ =re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , lowercase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
a_ =name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
a_ =name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
a_ =name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
a_ ="rwkv." + name
a_ =weight
return state_dict
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
a_ =5_0_2_7_7
a_ =AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
a_ =PreTrainedTokenizerFast(tokenizer_file=lowercase__ )
a_ =len(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
# 2. Build the config
a_ =list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
a_ =candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
a_ =RwkvConfig(
vocab_size=lowercase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase__ )
# 3. Download model file then convert state_dict
a_ =hf_hub_download(lowercase__ , lowercase__ )
a_ =torch.load(lowercase__ , map_location="cpu" )
a_ =convert_state_dict(lowercase__ )
# 4. Split in shards and save
a_ , a_ =shard_checkpoint(lowercase__ )
for shard_file, shard in shards.items():
torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
if index is not None:
a_ =os.path.join(lowercase__ , lowercase__ )
# Save the index as well
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
a_ =json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + "\n"
f.write(lowercase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
a_ =list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
a_ =torch.load(os.path.join(lowercase__ , lowercase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase__ , lowercase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
a_ =AutoModelForCausalLM.from_pretrained(lowercase__ )
model.push_to_hub(lowercase__ , max_shard_size="2GB" )
tokenizer.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowercase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 41
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowercase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =Github(os.environ["GITHUB_TOKEN"] )
a_ =g.get_repo("huggingface/diffusers" )
a_ =repo.get_issues(state="open" )
for issue in open_issues:
a_ =sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
a_ =comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 1
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase = TypeVar('''T''')
class UpperCAmelCase ( Generic[T]):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ = True) -> None:
"""simple docstring"""
a_ ={} # dictionary of lists
a_ =directed
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_)
self.adj_list[destination_vertex].append(lowerCAmelCase_)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_)
a_ =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCAmelCase_)
a_ =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a_ =[destination_vertex]
a_ =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_)
a_ =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a_ =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a_ =[destination_vertex]
a_ =[]
return self
def __repr__( self) -> str:
"""simple docstring"""
return pformat(self.adj_list)
| 41
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 1
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def UpperCAmelCase_ ( lowercase__ ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def UpperCAmelCase_ ( ):
'''simple docstring'''
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
a_ =[1, 2, 3]
with pytest.raises(lowercase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase__ , lowercase__ , num_proc=2 )
with pytest.raises(lowercase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase__ , lowercase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[1, 2]
a_ ={"a": 1, "b": 2}
a_ ={"a": [1, 2], "b": [3, 4]}
a_ ={"a": {"1": 1}, "b": 2}
a_ ={"a": 1, "b": 2, "c": 3, "d": 4}
a_ =[2, 3]
a_ ={"a": 2, "b": 3}
a_ ={"a": [2, 3], "b": [4, 5]}
a_ ={"a": {"1": 2}, "b": 3}
a_ ={"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
| 41
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase = logging.getLogger(__name__)
lowercase = '''Hello world! cécé herlolip'''
lowercase = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =BertAbsConfig(
temp_dir="." , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
a_ =torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage )
a_ =AbsSummarizer(lowercase__ , torch.device("cpu" ) , lowercase__ )
original.eval()
a_ =BertAbsSummarizer(lowercase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
a_ =BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
a_ =tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowercase__ )) )
a_ =torch.tensor(lowercase__ ).unsqueeze(0 )
a_ =tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowercase__ )) )
a_ =torch.tensor(lowercase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
a_ =encoder_input_ids
a_ =decoder_input_ids
a_ =a_ =None
a_ =None
a_ =a_ =None
a_ =a_ =None
a_ =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
a_ =original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
a_ =original.generator(lowercase__ )
a_ =new_model(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
a_ =new_model.generator(lowercase__ )
a_ =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowercase__ ) )
a_ =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowercase__ ) )
a_ =torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowercase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 41
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 1
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowercase = logging.get_logger(__name__)
@add_end_docstrings(__a)
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(lowerCAmelCase_)
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> str:
"""simple docstring"""
if "text_queries" in kwargs:
a_ =kwargs.pop("text_queries")
if isinstance(lowerCAmelCase_ , (str, Image.Image)):
a_ ={"image": image, "candidate_labels": candidate_labels}
else:
a_ =image
a_ =super().__call__(lowerCAmelCase_ , **lowerCAmelCase_)
return results
def lowercase_ ( self , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ ={}
if "threshold" in kwargs:
a_ =kwargs["threshold"]
if "top_k" in kwargs:
a_ =kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =load_image(inputs["image"])
a_ =inputs["candidate_labels"]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =candidate_labels.split(",")
a_ =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowerCAmelCase_):
a_ =self.tokenizer(lowerCAmelCase_ , return_tensors=self.framework)
a_ =self.image_processor(lowerCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowerCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =model_inputs.pop("target_size")
a_ =model_inputs.pop("candidate_label")
a_ =model_inputs.pop("is_last")
a_ =self.model(**lowerCAmelCase_)
a_ ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0.1 , lowerCAmelCase_=None) -> str:
"""simple docstring"""
a_ =[]
for model_output in model_outputs:
a_ =model_output["candidate_label"]
a_ =BaseModelOutput(lowerCAmelCase_)
a_ =self.image_processor.post_process_object_detection(
outputs=lowerCAmelCase_ , threshold=lowerCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
a_ =outputs["scores"][index].item()
a_ =self._get_bounding_box(outputs["boxes"][index][0])
a_ ={"score": score, "label": label, "box": box}
results.append(lowerCAmelCase_)
a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x["score"] , reverse=lowerCAmelCase_)
if top_k:
a_ =results[:top_k]
return results
def lowercase_ ( self , lowerCAmelCase_) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
a_ , a_ , a_ , a_ =box.int().tolist()
a_ ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 41
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 1
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =["a", "b", "c"]
# Defaults to last layer if both are None
a_ , a_ =get_aligned_output_features_output_indices(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , ["c"])
self.assertEqual(lowerCAmelCase_ , [2])
# Out indices set to match out features
a_ , a_ =get_aligned_output_features_output_indices(["a", "c"] , lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , ["a", "c"])
self.assertEqual(lowerCAmelCase_ , [0, 2])
# Out features set to match out indices
a_ , a_ =get_aligned_output_features_output_indices(lowerCAmelCase_ , [0, 2] , lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , ["a", "c"])
self.assertEqual(lowerCAmelCase_ , [0, 2])
# Out features selected from negative indices
a_ , a_ =get_aligned_output_features_output_indices(lowerCAmelCase_ , [-3, -1] , lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , ["a", "c"])
self.assertEqual(lowerCAmelCase_ , [-3, -1])
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCAmelCase_)
# Out features must be a list
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"])
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"])
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(lowerCAmelCase_ , 0 , ["a", "b"])
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(lowerCAmelCase_ , (0, 1) , ["a"])
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"])
# Out features should match out indices
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"])
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase_):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"])
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"])
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =BackboneMixin()
a_ =["a", "b", "c"]
a_ =["a", "c"]
a_ =[0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
a_ =["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"])
self.assertEqual(backbone.out_indices , [0, 1])
a_ =[-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"])
self.assertEqual(backbone.out_indices , [-3, -1])
| 41
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__ : bool = field(default=__a , metadata={"help": "Whether tp freeze the encoder."})
__magic_name__ : bool = field(default=__a , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__magic_name__ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__ : Optional[int] = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__magic_name__ : Optional[str] = field(default=__a , metadata={"help": "Source language id for translation."})
__magic_name__ : Optional[str] = field(default=__a , metadata={"help": "Target language id for translation."})
__magic_name__ : Optional[int] = field(default=__a , metadata={"help": "# num_beams to use for evaluation."})
__magic_name__ : bool = field(
default=__a , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase__ , os.path.join(lowercase__ , F"""{split}_results.json""" ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ =parser.parse_args_into_dataclasses()
check_output_dir(lowercase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(lowercase__ , lowercase__ , lowercase__ ):
assert hasattr(lowercase__ , lowercase__ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase__ , lowercase__ , getattr(lowercase__ , lowercase__ ) )
a_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=lowercase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a_ =model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase__ , lowercase__ ):
a_ =tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a_ =tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a_ =SeqaSeqDataset
# Get datasets
a_ =(
dataset_class(
lowercase__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
a_ =(
dataset_class(
lowercase__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a_ =(
dataset_class(
lowercase__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a_ =(
build_compute_metrics_fn(data_args.task , lowercase__ ) if training_args.predict_with_generate else None
)
a_ =SeqaSeqTrainer(
model=lowercase__ , args=lowercase__ , data_args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , data_collator=SeqaSeqDataCollator(
lowercase__ , lowercase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase__ , tokenizer=lowercase__ , )
a_ ={}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
a_ =trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a_ =train_result.metrics
a_ =data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a_ =trainer.evaluate(metric_key_prefix="val" )
a_ =data_args.n_val
a_ =round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
a_ =trainer.predict(test_dataset=lowercase__ , metric_key_prefix="test" )
a_ =test_output.metrics
a_ =data_args.n_test
if trainer.is_world_process_zero():
a_ =round(metrics["test_loss"] , 4 )
handle_metrics("test" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
if training_args.predict_with_generate:
a_ =tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ )
a_ =lmap(str.strip , lowercase__ )
write_txt_file(lowercase__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(lowercase__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase = 50_000
lowercase = 5_000
lowercase , lowercase = os.path.split(__file__)
lowercase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(lowercase__ ):
a_ =dataset[i]
@get_duration
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(0 , len(lowercase__ ) , lowercase__ ):
a_ =dataset[i : i + batch_size]
@get_duration
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
with dataset.formatted_as(type=lowercase__ ):
for i in range(lowercase__ ):
a_ =dataset[i]
@get_duration
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
with dataset.formatted_as(type=lowercase__ ):
for i in range(0 , lowercase__ , lowercase__ ):
a_ =dataset[i : i + batch_size]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ={"num examples": SPEED_TEST_N_EXAMPLES}
a_ =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0_0}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0_0_0}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_0}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_0_0_0}),
]
a_ =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0_0}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0_0_0}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_0}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
a_ =datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
a_ =generate_example_dataset(
os.path.join(lowercase__ , "dataset.arrow" ) , lowercase__ , num_examples=lowercase__ , seq_shapes={"list": (1_0_0,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowercase__ ) )
a_ =func(lowercase__ , **lowercase__ )
print("shuffling dataset" )
a_ =dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowercase__ ) )
a_ =func(
lowercase__ , **lowercase__ )
with open(lowercase__ , "wb" ) as f:
f.write(json.dumps(lowercase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 41
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
a_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
a_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
a_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a_ =sd.pop(lowercase__ )
a_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a_ =sd[key]
# We split QKV in separate Q,K,V
a_ =key.replace(".qkv_proj." , ".q_proj." )
a_ =key.replace(".qkv_proj." , ".k_proj." )
a_ =key.replace(".qkv_proj." , ".v_proj." )
a_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a_ , a_ , a_ =torch.split(lowercase__ , depth // 3 , dim=0 )
a_ =q
a_ =k
a_ =v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
a_ =load_checkpoint(lowercase__ )
if config is not None:
a_ =OPTConfig.from_pretrained(lowercase__ )
else:
a_ =OPTConfig()
a_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowercase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 41
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 1
|
'''simple docstring'''
lowercase = 8.3_144_598
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase = 300
lowercase = 28
lowercase = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = "yolos"
def __init__( self , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3_0_7_2 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=[5_1_2, 8_6_4] , lowerCAmelCase_=1_6 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=1_0_0 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , **lowerCAmelCase_ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =initializer_range
a_ =layer_norm_eps
a_ =image_size
a_ =patch_size
a_ =num_channels
a_ =qkv_bias
a_ =num_detection_tokens
a_ =use_mid_position_embeddings
a_ =auxiliary_loss
# Hungarian matcher
a_ =class_cost
a_ =bbox_cost
a_ =giou_cost
# Loss coefficients
a_ =bbox_loss_coefficient
a_ =giou_loss_coefficient
a_ =eos_coefficient
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Tuple = version.parse("1.11")
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self) -> float:
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return 1_2
| 41
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
lowercase = 1.6_0_2_1e-1_9 # units = C
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : List[Any] = (PNDMScheduler,)
__magic_name__ : Any = (("num_inference_steps", 50),)
def lowercase_ ( self , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ ={
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase_)
return config
def lowercase_ ( self , lowerCAmelCase_=0 , **lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =dict(self.forward_default_kwargs)
a_ =kwargs.pop("num_inference_steps" , lowerCAmelCase_)
a_ =self.dummy_sample
a_ =0.1 * sample
a_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
a_ =self.get_scheduler_config(**lowerCAmelCase_)
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
# copy over dummy past residuals
a_ =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase_)
a_ =scheduler_class.from_pretrained(lowerCAmelCase_)
new_scheduler.set_timesteps(lowerCAmelCase_)
# copy over dummy past residuals
a_ =dummy_past_residuals[:]
a_ =scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =new_scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ =scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =new_scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self) -> Dict:
"""simple docstring"""
pass
def lowercase_ ( self , lowerCAmelCase_=0 , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =dict(self.forward_default_kwargs)
a_ =kwargs.pop("num_inference_steps" , lowerCAmelCase_)
a_ =self.dummy_sample
a_ =0.1 * sample
a_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
# copy over dummy past residuals (must be after setting timesteps)
a_ =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase_)
a_ =scheduler_class.from_pretrained(lowerCAmelCase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase_)
# copy over dummy past residual (must be after setting timesteps)
a_ =dummy_past_residuals[:]
a_ =scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =new_scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ =scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =new_scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowercase_ ( self , **lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config(**lowerCAmelCase_)
a_ =scheduler_class(**lowerCAmelCase_)
a_ =1_0
a_ =self.dummy_model()
a_ =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase_)
for i, t in enumerate(scheduler.prk_timesteps):
a_ =model(lowerCAmelCase_ , lowerCAmelCase_)
a_ =scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ =model(lowerCAmelCase_ , lowerCAmelCase_)
a_ =scheduler.step_plms(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
return sample
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =dict(self.forward_default_kwargs)
a_ =kwargs.pop("num_inference_steps" , lowerCAmelCase_)
for scheduler_class in self.scheduler_classes:
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
a_ =self.dummy_sample
a_ =0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase_ , "set_timesteps"):
scheduler.set_timesteps(lowerCAmelCase_)
elif num_inference_steps is not None and not hasattr(lowerCAmelCase_ , "set_timesteps"):
a_ =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
a_ =dummy_past_residuals[:]
a_ =scheduler.step_prk(lowerCAmelCase_ , 0 , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =scheduler.step_prk(lowerCAmelCase_ , 1 , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ =scheduler.step_plms(lowerCAmelCase_ , 0 , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
a_ =scheduler.step_plms(lowerCAmelCase_ , 1 , lowerCAmelCase_ , **lowerCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase_)
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config(steps_offset=1)
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(1_0)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1]) , )
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =2_7
for scheduler_class in self.scheduler_classes:
a_ =self.dummy_sample
a_ =0.1 * sample
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ =scheduler.step_prk(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
def lowercase_ ( self) -> Any:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_):
a_ =self.scheduler_classes[0]
a_ =self.get_scheduler_config()
a_ =scheduler_class(**lowerCAmelCase_)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.full_loop()
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 1_9_8.1_3_1_8) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0) < 1e-3
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.full_loop(prediction_type="v_prediction")
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 6_7.3_9_8_6) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8) < 1e-3
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.0_1)
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 2_3_0.0_3_9_9) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5) < 1e-3
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.0_1)
a_ =torch.sum(torch.abs(lowerCAmelCase_))
a_ =torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 1_8_6.9_4_8_2) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4) < 1e-3
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = UnCLIPImageVariationPipeline
__magic_name__ : Union[str, Any] = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
__magic_name__ : Dict = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ : Optional[int] = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
__magic_name__ : Union[str, Any] = False
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return 1_0_0
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
a_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowerCAmelCase_)
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
a_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowerCAmelCase_)
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
torch.manual_seed(0)
a_ ={
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
a_ =UnCLIPTextProjModel(**lowerCAmelCase_)
return model
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
a_ ={
"sample_size": 3_2,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
a_ =UNetaDConditionModel(**lowerCAmelCase_)
return model
@property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
a_ =UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
torch.manual_seed(1)
a_ =UNetaDModel(**self.dummy_super_res_kwargs)
return model
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.dummy_decoder
a_ =self.dummy_text_proj
a_ =self.dummy_text_encoder
a_ =self.dummy_tokenizer
a_ =self.dummy_super_res_first
a_ =self.dummy_super_res_last
a_ =UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1_0_0_0 , )
a_ =UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1_0_0_0 , )
a_ =CLIPImageProcessor(crop_size=3_2 , size=3_2)
a_ =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0 , lowerCAmelCase_=True) -> List[Any]:
"""simple docstring"""
a_ =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
if str(lowerCAmelCase_).startswith("mps"):
a_ =torch.manual_seed(lowerCAmelCase_)
else:
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
if pil_image:
a_ =input_image * 0.5 + 0.5
a_ =input_image.clamp(0 , 1)
a_ =input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ =DiffusionPipeline.numpy_to_pil(lowerCAmelCase_)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
a_ =pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(**lowerCAmelCase_)
a_ =output.images
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
a_ =image[0, -3:, -3:, -1]
a_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ =np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
a_ =pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(**lowerCAmelCase_)
a_ =output.images
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
a_ =image[0, -3:, -3:, -1]
a_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ =np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
a_ =pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =[
pipeline_inputs["image"],
pipeline_inputs["image"],
]
a_ =pipe(**lowerCAmelCase_)
a_ =output.images
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =[
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
a_ =pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
a_ =image[0, -3:, -3:, -1]
a_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
a_ =np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =torch.device("cpu")
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : Optional[int] = 1
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
a_ =pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(0)
a_ =pipe.decoder.dtype
a_ =1
a_ =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
a_ =pipe.prepare_latents(
lowerCAmelCase_ , dtype=lowerCAmelCase_ , device=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , scheduler=DummyScheduler())
a_ =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
a_ =pipe.prepare_latents(
lowerCAmelCase_ , dtype=lowerCAmelCase_ , device=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , scheduler=DummyScheduler())
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
a_ =pipe(
**lowerCAmelCase_ , decoder_latents=lowerCAmelCase_ , super_res_latents=lowerCAmelCase_).images
a_ =self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_)
# Don't pass image, instead pass embedding
a_ =pipeline_inputs.pop("image")
a_ =pipe.image_encoder(lowerCAmelCase_).image_embeds
a_ =pipe(
**lowerCAmelCase_ , decoder_latents=lowerCAmelCase_ , super_res_latents=lowerCAmelCase_ , image_embeddings=lowerCAmelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1e-4
@skip_mps
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
a_ =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase_ , expected_max_diff=lowerCAmelCase_)
@skip_mps
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =torch_device == "cpu"
a_ =True
a_ =[
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase_ , relax_max_difference=lowerCAmelCase_ , additional_params_copy_to_batched_inputs=lowerCAmelCase_ , )
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =[
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
a_ =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowerCAmelCase_ , additional_params_copy_to_batched_inputs=lowerCAmelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowerCAmelCase_)
@skip_mps
def lowercase_ ( self) -> str:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowercase_ ( self) -> str:
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png")
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy")
a_ =UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa)
a_ =pipeline.to(lowerCAmelCase_)
pipeline.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =torch.Generator(device="cpu").manual_seed(0)
a_ =pipeline(
lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="np" , )
a_ =output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ , 1_5)
| 41
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 1
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase = TypeVar('''KEY''')
lowercase = TypeVar('''VAL''')
@dataclass(frozen=__a , slots=__a)
class UpperCAmelCase ( Generic[KEY, VAL]):
'''simple docstring'''
__magic_name__ : KEY
__magic_name__ : VAL
class UpperCAmelCase ( _Item):
'''simple docstring'''
def __init__( self) -> None:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
def __bool__( self) -> bool:
"""simple docstring"""
return False
lowercase = _DeletedItem()
class UpperCAmelCase ( MutableMapping[KEY, VAL]):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ = 8 , lowerCAmelCase_ = 0.7_5) -> None:
"""simple docstring"""
a_ =initial_block_size
a_ =[None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
a_ =capacity_factor
a_ =0
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
return hash(lowerCAmelCase_) % len(self._buckets)
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> bool:
"""simple docstring"""
a_ =self._buckets[ind]
if not stored:
a_ =_Item(lowerCAmelCase_ , lowerCAmelCase_)
self._len += 1
return True
elif stored.key == key:
a_ =_Item(lowerCAmelCase_ , lowerCAmelCase_)
return True
else:
return False
def lowercase_ ( self) -> bool:
"""simple docstring"""
a_ =len(self._buckets) * self._capacity_factor
return len(self) >= int(lowerCAmelCase_)
def lowercase_ ( self) -> bool:
"""simple docstring"""
if len(self._buckets) <= self._initial_block_size:
return False
a_ =len(self._buckets) * self._capacity_factor / 2
return len(self) < limit
def lowercase_ ( self , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ =self._buckets
a_ =[None] * new_size
a_ =0
for item in old_buckets:
if item:
self._add_item(item.key , item.val)
def lowercase_ ( self) -> None:
"""simple docstring"""
self._resize(len(self._buckets) * 2)
def lowercase_ ( self) -> None:
"""simple docstring"""
self._resize(len(self._buckets) // 2)
def lowercase_ ( self , lowerCAmelCase_) -> Iterator[int]:
"""simple docstring"""
a_ =self._get_bucket_index(lowerCAmelCase_)
for _ in range(len(self._buckets)):
yield ind
a_ =self._get_next_ind(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase_):
if self._try_set(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
break
def __setitem__( self , lowerCAmelCase_ , lowerCAmelCase_) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase_ , lowerCAmelCase_)
def __delitem__( self , lowerCAmelCase_) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase_):
a_ =self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase_)
if item is _deleted:
continue
if item.key == key:
a_ =_deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCAmelCase_) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase_):
a_ =self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase_)
def __len__( self) -> int:
"""simple docstring"""
return self._len
def __iter__( self) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self) -> str:
"""simple docstring"""
a_ =" ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item)
return f"""HashMap({val_string})"""
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 1
|
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> None:
"""simple docstring"""
a_ =data
# Initialize hash values
a_ =[
0x6a09_e667,
0xbb67_ae85,
0x3c6e_f372,
0xa54f_f53a,
0x510e_527f,
0x9b05_688c,
0x1f83_d9ab,
0x5be0_cd19,
]
# Initialize round constants
a_ =[
0x428a_2f98,
0x7137_4491,
0xb5c0_fbcf,
0xe9b5_dba5,
0x3956_c25b,
0x59f1_11f1,
0x923f_82a4,
0xab1c_5ed5,
0xd807_aa98,
0x1283_5b01,
0x2431_85be,
0x550c_7dc3,
0x72be_5d74,
0x80de_b1fe,
0x9bdc_06a7,
0xc19b_f174,
0xe49b_69c1,
0xefbe_4786,
0x0fc1_9dc6,
0x240c_a1cc,
0x2de9_2c6f,
0x4a74_84aa,
0x5cb0_a9dc,
0x76f9_88da,
0x983e_5152,
0xa831_c66d,
0xb003_27c8,
0xbf59_7fc7,
0xc6e0_0bf3,
0xd5a7_9147,
0x06ca_6351,
0x1429_2967,
0x27b7_0a85,
0x2e1b_2138,
0x4d2c_6dfc,
0x5338_0d13,
0x650a_7354,
0x766a_0abb,
0x81c2_c92e,
0x9272_2c85,
0xa2bf_e8a1,
0xa81a_664b,
0xc24b_8b70,
0xc76c_51a3,
0xd192_e819,
0xd699_0624,
0xf40e_3585,
0x106a_a070,
0x19a4_c116,
0x1e37_6c08,
0x2748_774c,
0x34b0_bcb5,
0x391c_0cb3,
0x4ed8_aa4a,
0x5b9c_ca4f,
0x682e_6ff3,
0x748f_82ee,
0x78a5_636f,
0x84c8_7814,
0x8cc7_0208,
0x90be_fffa,
0xa450_6ceb,
0xbef9_a3f7,
0xc671_78f2,
]
a_ =self.preprocessing(self.data)
self.final_hash()
@staticmethod
def lowercase_ ( lowerCAmelCase_) -> bytes:
"""simple docstring"""
a_ =b"\x80" + (b"\x00" * (6_3 - (len(lowerCAmelCase_) + 8) % 6_4))
a_ =struct.pack(">Q" , (len(lowerCAmelCase_) * 8))
return data + padding + big_endian_integer
def lowercase_ ( self) -> None:
"""simple docstring"""
a_ =[
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data) , 6_4)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
a_ =list(struct.unpack(">16L" , lowerCAmelCase_))
# add 48 0-ed integers
words += [0] * 4_8
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ =self.hashes
for index in range(0 , 6_4):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
a_ =(
self.ror(words[index - 1_5] , 7)
^ self.ror(words[index - 1_5] , 1_8)
^ (words[index - 1_5] >> 3)
)
a_ =(
self.ror(words[index - 2] , 1_7)
^ self.ror(words[index - 2] , 1_9)
^ (words[index - 2] >> 1_0)
)
a_ =(
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
a_ =self.ror(lowerCAmelCase_ , 6) ^ self.ror(lowerCAmelCase_ , 1_1) ^ self.ror(lowerCAmelCase_ , 2_5)
a_ =(e & f) ^ ((~e & 0xffff_ffff) & g)
a_ =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
a_ =self.ror(lowerCAmelCase_ , 2) ^ self.ror(lowerCAmelCase_ , 1_3) ^ self.ror(lowerCAmelCase_ , 2_2)
a_ =(a & b) ^ (a & c) ^ (b & c)
a_ =(sa + maj) % 0x1_0000_0000
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ =(
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
a_ =[a, b, c, d, e, f, g, h]
# Modify final values
a_ =[
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes)
]
a_ ="".join([hex(lowerCAmelCase_)[2:].zfill(8) for value in self.hashes])
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
return 0xffff_ffff & (value << (3_2 - rotations)) | (value >> rotations)
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> None:
"""simple docstring"""
import hashlib
a_ =bytes("Test String" , "utf-8")
self.assertEqual(SHAaaa(lowerCAmelCase_).hash , hashlib.shaaaa(lowerCAmelCase_).hexdigest())
def UpperCAmelCase_ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
a_ =argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
a_ =parser.parse_args()
a_ =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
a_ =f.read()
else:
a_ =bytes(lowercase__ , "utf-8" )
print(SHAaaa(lowercase__ ).hash )
if __name__ == "__main__":
main()
| 41
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 1
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowercase = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
lowercase = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
lowercase = BeautifulSoup(res.text, '''html.parser''')
lowercase = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F"""https://google.com{link.get("href")}""")
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = KandinskyVaaImgaImgPipeline
__magic_name__ : Tuple = ["image_embeds", "negative_image_embeds", "image"]
__magic_name__ : Tuple = [
"image_embeds",
"negative_image_embeds",
"image",
]
__magic_name__ : Tuple = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ : Optional[int] = False
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return 1_0_0
@property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0)
a_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a_ =UNetaDConditionModel(**lowerCAmelCase_)
return model
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0)
a_ =VQModel(**self.dummy_movq_kwargs)
return model
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.dummy_unet
a_ =self.dummy_movq
a_ ={
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
a_ =DDIMScheduler(**lowerCAmelCase_)
a_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0) -> str:
"""simple docstring"""
a_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
a_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowerCAmelCase_)
# create init_image
a_ =floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase_)).to(lowerCAmelCase_)
a_ =image.cpu().permute(0 , 2 , 3 , 1)[0]
a_ =Image.fromarray(np.uinta(lowerCAmelCase_)).convert("RGB").resize((2_5_6, 2_5_6))
if str(lowerCAmelCase_).startswith("mps"):
a_ =torch.manual_seed(lowerCAmelCase_)
else:
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
a_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
a_ =pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =pipe(**self.get_dummy_inputs(lowerCAmelCase_))
a_ =output.images
a_ =pipe(
**self.get_dummy_inputs(lowerCAmelCase_) , return_dict=lowerCAmelCase_ , )[0]
a_ =image[0, -3:, -3:, -1]
a_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ =np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy")
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
a_ ="A red cartoon frog, 4k"
a_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase_)
a_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa)
a_ =pipeline.to(lowerCAmelCase_)
pipeline.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =torch.Generator(device="cpu").manual_seed(0)
a_ , a_ =pipe_prior(
lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a_ =pipeline(
image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
a_ =output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_)
| 41
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =LxmertConfig.from_json_file(lowercase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a_ =LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 41
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase = logging.get_logger(__name__)
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =b.T
a_ =np.sum(np.square(lowercase__ ) , axis=1 )
a_ =np.sum(np.square(lowercase__ ) , axis=0 )
a_ =np.matmul(lowercase__ , lowercase__ )
a_ =aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =x.reshape(-1 , 3 )
a_ =squared_euclidean_distance(lowercase__ , lowercase__ )
return np.argmin(lowercase__ , axis=1 )
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : List[str] = ["pixel_values"]
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =size if size is not None else {"height": 2_5_6, "width": 2_5_6}
a_ =get_size_dict(lowerCAmelCase_)
a_ =np.array(lowerCAmelCase_) if clusters is not None else None
a_ =do_resize
a_ =size
a_ =resample
a_ =do_normalize
a_ =do_color_quantize
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
"""simple docstring"""
a_ =get_size_dict(lowerCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""")
return resize(
lowerCAmelCase_ , size=(size["height"], size["width"]) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , ) -> np.ndarray:
"""simple docstring"""
a_ =rescale(image=lowerCAmelCase_ , scale=1 / 1_2_7.5 , data_format=lowerCAmelCase_)
a_ =image - 1
return image
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
"""simple docstring"""
a_ =do_resize if do_resize is not None else self.do_resize
a_ =size if size is not None else self.size
a_ =get_size_dict(lowerCAmelCase_)
a_ =resample if resample is not None else self.resample
a_ =do_normalize if do_normalize is not None else self.do_normalize
a_ =do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a_ =clusters if clusters is not None else self.clusters
a_ =np.array(lowerCAmelCase_)
a_ =make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
a_ =[to_numpy_array(lowerCAmelCase_) for image in images]
if do_resize:
a_ =[self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_) for image in images]
if do_normalize:
a_ =[self.normalize(image=lowerCAmelCase_) for image in images]
if do_color_quantize:
a_ =[to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a_ =np.array(lowerCAmelCase_)
a_ =color_quantize(lowerCAmelCase_ , lowerCAmelCase_).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
a_ =images.shape[0]
a_ =images.reshape(lowerCAmelCase_ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
a_ =list(lowerCAmelCase_)
else:
a_ =[to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_) for image in images]
a_ ={"input_ids": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_)
| 41
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''ViTFeatureExtractor''']
lowercase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
a_ =str(lowercase__ )
a_ ="".join(sorted(lowercase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCAmelCase_ ( lowercase__ = 9_9 ):
'''simple docstring'''
if not 0 < percent < 1_0_0:
raise ValueError("solution() only accepts values from 0 to 100" )
a_ =0
a_ =1
while True:
if check_bouncy(lowercase__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 41
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =FunnelConfig.from_json_file(lowercase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a_ =FunnelBaseModel(lowercase__ ) if base_model else FunnelModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 41
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[1]
for i in range(2 , lowercase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
a_ =[]
a_ =list(range(lowercase__ ) )
# Find permutation
while factorials:
a_ =factorials.pop()
a_ , a_ =divmod(lowercase__ , lowercase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 1
|
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self) -> str:
"""simple docstring"""
a_ =0
a_ =0
a_ ={}
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
if vertex not in self.adjacency:
a_ ={}
self.num_vertices += 1
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
self.add_vertex(lowerCAmelCase_)
self.add_vertex(lowerCAmelCase_)
if head == tail:
return
a_ =weight
a_ =weight
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.get_edges()
for edge in edges:
a_ , a_ , a_ =edge
edges.remove((tail, head, weight))
for i in range(len(lowerCAmelCase_)):
a_ =list(edges[i])
edges.sort(key=lambda lowerCAmelCase_: e[2])
for i in range(len(lowerCAmelCase_) - 1):
if edges[i][2] >= edges[i + 1][2]:
a_ =edges[i][2] + 1
for edge in edges:
a_ , a_ , a_ =edge
a_ =weight
a_ =weight
def __str__( self) -> Union[str, Any]:
"""simple docstring"""
a_ =""
for tail in self.adjacency:
for head in self.adjacency[tail]:
a_ =self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n")
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowercase_ ( lowerCAmelCase_=None , lowerCAmelCase_=None) -> Union[str, Any]:
"""simple docstring"""
a_ =Graph()
if vertices is None:
a_ =[]
if edges is None:
a_ =[]
for vertex in vertices:
g.add_vertex(lowerCAmelCase_)
for edge in edges:
g.add_edge(*lowerCAmelCase_)
return g
class UpperCAmelCase :
'''simple docstring'''
def __init__( self) -> Any:
"""simple docstring"""
a_ ={}
a_ ={}
def __len__( self) -> Union[str, Any]:
"""simple docstring"""
return len(self.parent)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
if item in self.parent:
return self.find(lowerCAmelCase_)
a_ =item
a_ =0
return item
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(lowerCAmelCase_)
if item != self.parent[item]:
a_ =self.find(self.parent[item])
return self.parent[item]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =self.find(lowerCAmelCase_)
a_ =self.find(lowerCAmelCase_)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
a_ =roota
return roota
if self.rank[roota] < self.rank[roota]:
a_ =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
a_ =roota
return roota
return None
@staticmethod
def lowercase_ ( lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =graph.num_vertices
a_ =Graph.UnionFind()
a_ =[]
while num_components > 1:
a_ ={}
for vertex in graph.get_vertices():
a_ =-1
a_ =graph.get_edges()
for edge in edges:
a_ , a_ , a_ =edge
edges.remove((tail, head, weight))
for edge in edges:
a_ , a_ , a_ =edge
a_ =union_find.find(lowerCAmelCase_)
a_ =union_find.find(lowerCAmelCase_)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a_ =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a_ =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
a_ , a_ , a_ =cheap_edge[vertex]
if union_find.find(lowerCAmelCase_) != union_find.find(lowerCAmelCase_):
union_find.union(lowerCAmelCase_ , lowerCAmelCase_)
mst_edges.append(cheap_edge[vertex])
a_ =num_components - 1
a_ =Graph.build(edges=lowerCAmelCase_)
return mst
| 41
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for param in module.parameters():
a_ =False
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
a_ ="mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =plt.imshow(lowercase__ )
fig.axes.get_xaxis().set_visible(lowercase__ )
fig.axes.get_yaxis().set_visible(lowercase__ )
plt.show()
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =datetime.now()
a_ =current_time.strftime("%H:%M:%S" )
return timestamp
| 41
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model'''}
lowercase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowercase = {
'''google/bigbird-roberta-base''': 4_096,
'''google/bigbird-roberta-large''': 4_096,
'''google/bigbird-base-trivia-itc''': 4_096,
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : int = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
"""simple docstring"""
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else bos_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else eos_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else unk_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else pad_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cls_token
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a_ =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
a_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
a_ =vocab_file
a_ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase_)
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ ={self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.__dict__.copy()
a_ =None
return state
def __setstate__( self , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a_ ={}
a_ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =self.sp_model.IdToPiece(lowerCAmelCase_)
return token
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =[]
a_ =""
a_ =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_) + token
a_ =True
a_ =[]
else:
current_sub_tokens.append(lowerCAmelCase_)
a_ =False
out_string += self.sp_model.decode(lowerCAmelCase_)
return out_string.strip()
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ) -> str:
"""simple docstring"""
a_ =kwargs.pop("use_source_tokenizer" , lowerCAmelCase_)
a_ =self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a_ =[]
a_ =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_))
a_ =[]
sub_texts.append(lowerCAmelCase_)
else:
current_sub_text.append(lowerCAmelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
a_ =re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(lowerCAmelCase_))
else:
a_ ="".join(lowerCAmelCase_)
a_ =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a_ =self.clean_up_tokenization(lowerCAmelCase_)
return clean_text
else:
return text
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , "wb") as fi:
a_ =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ =[self.cls_token_id]
a_ =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_)) + [1]
return [1] + ([0] * len(lowerCAmelCase_)) + [1] + ([0] * len(lowerCAmelCase_)) + [1]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.sep_token_id]
a_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 41
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = XGLMTokenizer
__magic_name__ : Dict = XGLMTokenizerFast
__magic_name__ : List[Any] = True
__magic_name__ : Tuple = True
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a_ =XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ ="<pad>"
a_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_) , lowerCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_) , lowerCAmelCase_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase_) , 1_0_0_8)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_)
a_ =tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
a_ =tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
a_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase_)
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ ( self) -> str:
"""simple docstring"""
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name)
a_ =XGLMTokenizer(f.name , keep_accents=lowerCAmelCase_)
a_ =pickle.dumps(lowerCAmelCase_)
pickle.loads(lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a_ =self.get_tokenizer()
a_ =self.get_rust_tokenizer()
a_ ="I was born in 92000, and this is falsé."
a_ =tokenizer.tokenize(lowerCAmelCase_)
a_ =rust_tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
a_ =self.get_rust_tokenizer()
a_ =tokenizer.encode(lowerCAmelCase_)
a_ =rust_tokenizer.encode(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ ="Hello World!"
a_ =[2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_))
@slow
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =(
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a_ =[2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_))
@slow
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ ={
"input_ids": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase_ , )
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 1
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a_ =len(lowerCAmelCase_) - 1
def lowercase_ ( self , lowerCAmelCase_) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a_ =[]
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase_) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase_) , 5) == 1
return output_values
def lowercase_ ( self , lowerCAmelCase_) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a_ =self.basis_function(lowerCAmelCase_)
a_ =0.0
a_ =0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ ( self , lowerCAmelCase_ = 0.0_1) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
a_ =[] # x coordinates of points to plot
a_ =[] # y coordinates of points to plot
a_ =0.0
while t <= 1:
a_ =self.bezier_curve_function(lowerCAmelCase_)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
a_ =[i[0] for i in self.list_of_points]
a_ =[i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase_ , lowerCAmelCase_ , color="blue" , label="Curve of Degree " + str(self.degree) , )
plt.scatter(lowerCAmelCase_ , lowerCAmelCase_ , color="red" , label="Control Points")
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=1_8 , lowerCAmelCase_=3_0 , lowerCAmelCase_=4_0_0 , lowerCAmelCase_=True , lowerCAmelCase_=3_2 , lowerCAmelCase_=True , ) -> Tuple:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =num_channels
a_ =image_size
a_ =min_resolution
a_ =max_resolution
a_ =do_resize
a_ =size_divisor
a_ =do_rescale
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = GLPNImageProcessor if is_vision_available() else None
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =GLPNImageProcessingTester(self)
@property
def lowercase_ ( self) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase_ , "size_divisor"))
self.assertTrue(hasattr(lowerCAmelCase_ , "resample"))
self.assertTrue(hasattr(lowerCAmelCase_ , "do_rescale"))
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
pass
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image)
# Test not batched input (GLPNImageProcessor doesn't support batching)
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray)
# Test not batched input (GLPNImageProcessor doesn't support batching)
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test not batched input (GLPNImageProcessor doesn't support batching)
a_ =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0)
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
| 41
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =list(range(len(lowercase__ ) ) )
a_ =[v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
a_ =0
a_ =[0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
a_ =1
max_value += value[i]
capacity -= weight[i]
else:
a_ =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 1
|
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=9_9 , lowerCAmelCase_=6_4 , lowerCAmelCase_=3_2 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Optional[int]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =seq_length
a_ =is_training
a_ =use_input_mask
a_ =use_token_type_ids
a_ =use_labels
a_ =vocab_size
a_ =hidden_size
a_ =embedding_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =type_sequence_label_size
a_ =initializer_range
a_ =num_labels
a_ =num_choices
a_ =scope
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ =None
if self.use_input_mask:
a_ =random_attention_mask([self.batch_size, self.seq_length])
a_ =None
if self.use_token_type_ids:
a_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ =None
a_ =None
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ =ids_tensor([self.batch_size] , self.num_choices)
a_ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a_ =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
a_ =model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =self.num_labels
a_ =MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.num_labels
a_ =MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =self.num_choices
a_ =MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
a_ =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =config_and_inputs
a_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __a , __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Dict = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ : Optional[int] = True
# test_resize_embeddings = False
__magic_name__ : str = False
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False) -> str:
"""simple docstring"""
a_ =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
a_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
a_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =MegatronBertModelTester(self)
a_ =ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
lowercase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
@unittest.skip("Model is not available.")
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ ="nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
a_ =os.path.join(os.environ["MYDIR"] , lowerCAmelCase_)
a_ =MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
a_ =_long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
a_ =model(lowerCAmelCase_)[0]
a_ =torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
a_ =[-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
a_ =output[0, ii, jj]
a_ =expected[3 * ii + jj]
a_ ="ii={} jj={} a={} b={}".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 41
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( _lowerCamelCase):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=9_9 , lowerCAmelCase_=0 , lowerCAmelCase_=3_2 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_="last" , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =seq_length
a_ =is_training
a_ =use_input_lengths
a_ =use_token_type_ids
a_ =use_labels
a_ =gelu_activation
a_ =sinusoidal_embeddings
a_ =causal
a_ =asm
a_ =n_langs
a_ =vocab_size
a_ =n_special
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =type_sequence_label_size
a_ =initializer_range
a_ =num_labels
a_ =num_choices
a_ =summary_type
a_ =use_proj
a_ =scope
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ =random_attention_mask([self.batch_size, self.seq_length])
a_ =None
if self.use_input_lengths:
a_ =(
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ =None
if self.use_token_type_ids:
a_ =ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ =None
a_ =None
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ =ids_tensor([self.batch_size] , 2).float()
a_ =ids_tensor([self.batch_size] , self.num_choices)
a_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Tuple:
"""simple docstring"""
a_ =FlaubertModel(config=A__)
model.to(A__)
model.eval()
a_ =model(A__ , lengths=A__ , langs=A__)
a_ =model(A__ , langs=A__)
a_ =model(A__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Dict:
"""simple docstring"""
a_ =FlaubertWithLMHeadModel(A__)
model.to(A__)
model.eval()
a_ =model(A__ , token_type_ids=A__ , labels=A__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Optional[Any]:
"""simple docstring"""
a_ =FlaubertForQuestionAnsweringSimple(A__)
model.to(A__)
model.eval()
a_ =model(A__)
a_ =model(A__ , start_positions=A__ , end_positions=A__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =FlaubertForQuestionAnswering(A__)
model.to(A__)
model.eval()
a_ =model(A__)
a_ =model(
A__ , start_positions=A__ , end_positions=A__ , cls_index=A__ , is_impossible=A__ , p_mask=A__ , )
a_ =model(
A__ , start_positions=A__ , end_positions=A__ , cls_index=A__ , is_impossible=A__ , )
((a_ ) , ) =result_with_labels.to_tuple()
a_ =model(A__ , start_positions=A__ , end_positions=A__)
((a_ ) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> List[str]:
"""simple docstring"""
a_ =FlaubertForSequenceClassification(A__)
model.to(A__)
model.eval()
a_ =model(A__)
a_ =model(A__ , labels=A__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Optional[Any]:
"""simple docstring"""
a_ =self.num_labels
a_ =FlaubertForTokenClassification(A__)
model.to(A__)
model.eval()
a_ =model(A__ , attention_mask=A__ , labels=A__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> int:
"""simple docstring"""
a_ =self.num_choices
a_ =FlaubertForMultipleChoice(config=A__)
model.to(A__)
model.eval()
a_ =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ =model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) =config_and_inputs
a_ ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False) -> List[Any]:
"""simple docstring"""
a_ =super()._prepare_for_class(A__ , A__ , return_labels=A__)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__)
a_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__)
return inputs_dict
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =FlaubertModelTester(self)
a_ =ConfigTester(self , config_class=A__ , emb_dim=3_7)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A__)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A__)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*A__)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A__)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A__)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*A__)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*A__)
@slow
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =FlaubertModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@slow
@require_torch_gpu
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ =True
a_ =model_class(config=A__)
a_ =self._prepare_for_class(A__ , A__)
a_ =torch.jit.trace(
A__ , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A__ , os.path.join(A__ , "traced_model.pt"))
a_ =torch.jit.load(os.path.join(A__ , "traced_model.pt") , map_location=A__)
loaded(inputs_dict["input_ids"].to(A__) , inputs_dict["attention_mask"].to(A__))
@require_torch
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ =torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
with torch.no_grad():
a_ =model(A__)[0]
a_ =torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , A__)
a_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4))
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase ( _snake_case):
'''simple docstring'''
__magic_name__ : int = "levit"
def __init__( self , lowerCAmelCase_=2_2_4 , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1_6 , lowerCAmelCase_=[1_2_8, 2_5_6, 3_8_4] , lowerCAmelCase_=[4, 8, 1_2] , lowerCAmelCase_=[4, 4, 4] , lowerCAmelCase_=[1_6, 1_6, 1_6] , lowerCAmelCase_=0 , lowerCAmelCase_=[2, 2, 2] , lowerCAmelCase_=[2, 2, 2] , lowerCAmelCase_=0.0_2 , **lowerCAmelCase_ , ) -> str:
"""simple docstring"""
super().__init__(**lowerCAmelCase__)
a_ =image_size
a_ =num_channels
a_ =kernel_size
a_ =stride
a_ =padding
a_ =hidden_sizes
a_ =num_attention_heads
a_ =depths
a_ =key_dim
a_ =drop_path_rate
a_ =patch_size
a_ =attention_ratio
a_ =mlp_ratio
a_ =initializer_range
a_ =[
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase ( _snake_case):
'''simple docstring'''
__magic_name__ : int = version.parse("1.11")
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self) -> float:
"""simple docstring"""
return 1e-4
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__magic_name__ : Optional[str] = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=snake_case__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__ : bool = field(default=snake_case__ , metadata={"help": "Whether tp freeze the encoder."})
__magic_name__ : bool = field(default=snake_case__ , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__magic_name__ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__ : Optional[int] = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__ : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__magic_name__ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__magic_name__ : Optional[str] = field(default=snake_case__ , metadata={"help": "Source language id for translation."})
__magic_name__ : Optional[str] = field(default=snake_case__ , metadata={"help": "Target language id for translation."})
__magic_name__ : Optional[int] = field(default=snake_case__ , metadata={"help": "# num_beams to use for evaluation."})
__magic_name__ : bool = field(
default=snake_case__ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F"""{split}_results.json""" ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ , a_ , a_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ , a_ , a_ =parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a_ =model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ =tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a_ =tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a_ =SeqaSeqDataset
# Get datasets
a_ =(
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
a_ =(
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a_ =(
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a_ =(
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
a_ =SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
a_ ={}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
a_ =trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a_ =train_result.metrics
a_ =data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a_ =trainer.evaluate(metric_key_prefix="val" )
a_ =data_args.n_val
a_ =round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
a_ =trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
a_ =test_output.metrics
a_ =data_args.n_test
if trainer.is_world_process_zero():
a_ =round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
a_ =tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
a_ =lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
lowercase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
a_ =(
F"""Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowercase__ )}"""
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =filter(lambda lowercase__ : p.requires_grad , model.parameters() )
a_ =sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase = logging.getLogger(__name__)
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if metric == "rouge2":
a_ ='{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
a_ ='{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
a_ ='{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
a_ ='{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
a_ =ModelCheckpoint(
dirpath=_lowercase , filename=_lowercase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=_lowercase , verbose=_lowercase , )
class UpperCAmelCase ( pl.Callback):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ ={f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(UpperCamelCase_)
@rank_zero_only
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True) -> None:
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""")
a_ =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
a_ =Path(pl_module.hparams.output_dir)
if type_path == "test":
a_ =od / 'test_results.txt'
a_ =od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a_ =od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
a_ =od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=UpperCamelCase_)
generations_file.parent.mkdir(exist_ok=UpperCamelCase_)
with open(UpperCamelCase_ , "a+") as writer:
for key in sorted(UpperCamelCase_):
if key in ["log", "progress_bar", "preds"]:
continue
a_ =metrics[key]
if isinstance(UpperCamelCase_ , torch.Tensor):
a_ =val.item()
a_ =f"""{key}: {val:.6f}\n"""
writer.write(UpperCamelCase_)
if not save_generations:
return
if "preds" in metrics:
a_ ='\n'.join(metrics["preds"])
generations_file.open("w+").write(UpperCamelCase_)
@rank_zero_only
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
try:
a_ =pl_module.model.model.num_parameters()
except AttributeError:
a_ =pl_module.model.num_parameters()
a_ =count_trainable_parameters(UpperCamelCase_)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6})
@rank_zero_only
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(UpperCamelCase_ , UpperCamelCase_ , "test")
@rank_zero_only
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=9_9 , lowerCAmelCase_=3_2 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Optional[int]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =seq_length
a_ =is_training
a_ =use_token_type_ids
a_ =use_labels
a_ =vocab_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =type_sequence_label_size
a_ =initializer_range
a_ =num_labels
a_ =num_choices
a_ =scope
a_ =self.vocab_size - 1
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ =None
if self.use_token_type_ids:
a_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ =None
a_ =None
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ =ids_tensor([self.batch_size] , self.num_choices)
a_ =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =OpenAIGPTModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
a_ =model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase)
a_ =model(__lowerCamelCase , token_type_ids=__lowerCamelCase)
a_ =model(__lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =OpenAIGPTLMHeadModel(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
a_ =model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =OpenAIGPTDoubleHeadsModel(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
a_ =model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =self.num_labels
a_ =OpenAIGPTForSequenceClassification(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
(
a_
) =config_and_inputs
a_ ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__magic_name__ : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__magic_name__ : Optional[Any] = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False) -> Dict:
"""simple docstring"""
a_ =super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , )
a_ =inputs_dict["labels"]
a_ =inputs_dict["labels"]
a_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , )
a_ =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase)
return inputs_dict
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =OpenAIGPTModelTester(self)
a_ =ConfigTester(self , config_class=__lowerCamelCase , n_embd=3_7)
def lowercase_ ( self) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase)
@slow
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =OpenAIGPTModel.from_pretrained(__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
@require_torch
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =OpenAIGPTLMHeadModel.from_pretrained("openai-gpt")
model.to(__lowerCamelCase)
a_ =torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=__lowerCamelCase) # the president is
a_ =[
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ =model.generate(__lowerCamelCase , do_sample=__lowerCamelCase)
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase)
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = "nat"
__magic_name__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowerCAmelCase_=4 , lowerCAmelCase_=3 , lowerCAmelCase_=6_4 , lowerCAmelCase_=[3, 4, 6, 5] , lowerCAmelCase_=[2, 4, 8, 1_6] , lowerCAmelCase_=7 , lowerCAmelCase_=3.0 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-5 , lowerCAmelCase_=0.0 , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a_ =patch_size
a_ =num_channels
a_ =embed_dim
a_ =depths
a_ =len(UpperCAmelCase_)
a_ =num_heads
a_ =kernel_size
a_ =mlp_ratio
a_ =qkv_bias
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =drop_path_rate
a_ =hidden_act
a_ =layer_norm_eps
a_ =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a_ =int(embed_dim * 2 ** (len(UpperCAmelCase_) - 1))
a_ =layer_scale_init_value
a_ =['stem'] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_) + 1)]
a_ =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names)
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {"vocab_file": "vocab.txt"}
lowercase = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
lowercase = {
"facebook/esm2_t6_8M_UR50D": 1_024,
"facebook/esm2_t12_35M_UR50D": 1_024,
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" ) as f:
a_ =f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase ( UpperCAmelCase_):
'''simple docstring'''
__magic_name__ : str = VOCAB_FILES_NAMES
__magic_name__ : int = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[str] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<cls>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_="<eos>" , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(**_lowercase)
a_ =load_vocab_file(_lowercase)
a_ =dict(enumerate(self.all_tokens))
a_ ={tok: ind for ind, tok in enumerate(self.all_tokens)}
a_ =unk_token
a_ =cls_token
a_ =pad_token
a_ =mask_token
a_ =eos_token
a_ =self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> str:
"""simple docstring"""
return self._id_to_token.get(_lowercase , self.unk_token)
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
return text.split()
def lowercase_ ( self , lowerCAmelCase_=False) -> List[Any]:
"""simple docstring"""
return len(self._id_to_token)
def lowercase_ ( self) -> Any:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> str:
"""simple docstring"""
return self._id_to_token.get(_lowercase , self.unk_token)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[int]:
"""simple docstring"""
a_ =[self.cls_token_id]
a_ =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!")
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a_ =[1] + ([0] * len(_lowercase)) + [1]
if token_ids_a is not None:
mask += [0] * len(_lowercase) + [1]
return mask
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =os.path.join(_lowercase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt")
with open(_lowercase , "w") as f:
f.write("\n".join(self.all_tokens))
return (vocab_file,)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=_lowercase)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = False) -> int:
"""simple docstring"""
return super()._add_tokens(_lowercase , special_tokens=_lowercase)
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCAmelCase_ ( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
a_ =[tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase ( __A , __A , __A , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = StableDiffusionLatentUpscalePipeline
__magic_name__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
__magic_name__ : List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
__magic_name__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__magic_name__ : Dict = frozenset([])
__magic_name__ : str = True
@property
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =1
a_ =4
a_ =(1_6, 1_6)
a_ =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowerCAmelCase_)
return image
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
a_ =UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=lowerCAmelCase_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=lowerCAmelCase_ , only_cross_attention=lowerCAmelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
a_ =AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
a_ =EulerDiscreteScheduler(prediction_type="sample")
a_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="quick_gelu" , projection_dim=5_1_2 , )
a_ =CLIPTextModel(lowerCAmelCase_)
a_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a_ ={
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0) -> List[str]:
"""simple docstring"""
if str(lowerCAmelCase_).startswith("mps"):
a_ =torch.manual_seed(lowerCAmelCase_)
else:
a_ =torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
a_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ ='''cpu'''
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_)
a_ =pipe(**lowerCAmelCase_).images
a_ =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3))
a_ =np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5])
a_ =np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCAmelCase_ , 1e-3)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3)
def lowercase_ ( self) -> int:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
def lowercase_ ( self) -> int:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =[
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**lowerCAmelCase_)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCAmelCase_)
pipe.to(lowerCAmelCase_)
pipe.set_progress_bar_config(disable=lowerCAmelCase_)
a_ =self.get_dummy_inputs(lowerCAmelCase_)
a_ =2
a_ =[]
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
a_ =getattr(lowerCAmelCase_ , scheduler_enum.name)
a_ =scheduler_cls.from_config(pipe.scheduler.config)
a_ =pipe(**lowerCAmelCase_)[0]
outputs.append(lowerCAmelCase_)
assert check_same_shape(lowerCAmelCase_)
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =torch.manual_seed(3_3)
a_ =StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa)
pipe.to("cuda")
a_ =StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa)
upscaler.to("cuda")
a_ ='''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
a_ =pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="latent").images
a_ =upscaler(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=2_0 , guidance_scale=0 , generator=lowerCAmelCase_ , output_type="np" , ).images[0]
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy")
assert np.abs((expected_image - image).mean()) < 5e-2
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =torch.manual_seed(3_3)
a_ =StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa)
upscaler.to("cuda")
a_ ='''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
a_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png")
a_ =upscaler(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=2_0 , guidance_scale=0 , generator=lowerCAmelCase_ , output_type="np" , ).images[0]
a_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy")
assert np.abs((expected_image - image).max()) < 5e-2
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a_ =flax_key_tuple[:-1] + ("weight",)
a_ =torch.permute(__lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ):
# linear layer
a_ =flax_key_tuple[:-1] + ("weight",)
a_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a_ =flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if "metadata" in layer:
a_ =layer.split("metadata" )
a_ ="".join(split_layer[0] )[:-1]
a_ =[tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
a_ =layer.split("kvstore" )
a_ ="".join(split_layer[0] )[:-1]
a_ =[tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
a_ =layer.split("/" )
a_ ="/".join(split_layer[:-1] )
a_ =(split_layer[-1],)
if "kvstore/path" in layer:
a_ =F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
a_ ="file"
else:
a_ =checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =rename_keys(__lowerCAmelCase )
a_ ={}
for k, v in current_block.items():
a_ =v
a_ =new_current_block
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = WEIGHTS_NAME ):
'''simple docstring'''
a_ =convert_file_size_to_int(__lowerCAmelCase )
a_ =[]
a_ ={}
a_ =0
a_ =0
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
a_ =serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
a_ =flatten_dict(__lowerCAmelCase , sep="/" )
a_ ={}
for layer in checkpoint_info.keys():
a_ , a_ , a_ =get_key_and_tensorstore_dict(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if curr_real_layer_name in all_layers:
a_ =content
else:
a_ ={split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a_ =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a_ =torch.tensor(__lowerCAmelCase )
a_ =raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a_ , a_ =rename_base_flax_keys(tuple(key.split("/" ) ) , __lowerCAmelCase )
a_ ="/".join(__lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a_ =os.path.join(
__lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a_ ={}
a_ =0
a_ =raw_weights.to(getattr(__lowerCAmelCase , __lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a_ =os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a_ ={}
a_ ={}
for idx, shard in enumerate(__lowerCAmelCase ):
a_ =weights_name.replace(
".bin" , F"""-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
a_ =os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
a_ =shard
for key in shard:
a_ =shard_file
# Add the metadata
a_ ={"total_size": total_size}
a_ ={"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
a_ =json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
lowercase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCAmelCase_ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a_ =SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
a_ =SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
a_ =TaTokenizer.from_pretrained("t5-small" )
a_ ="A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
a_ =tokenizer(__lowerCAmelCase , return_tensors="pt" ).input_ids
a_ =model.generate(__lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__="attention" ):
'''simple docstring'''
a_ =params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
a_ =params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
a_ =params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
a_ =params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
'''simple docstring'''
if split_mlp_wi:
a_ =params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
a_ =params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
a_ =(wi_a, wi_a)
else:
a_ =params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
a_ =params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def UpperCAmelCase_ ( lowercase__ , *, lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =traverse_util.flatten_dict(variables["target"] )
a_ ={"/".join(lowerCamelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a_ ="encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , lowerCamelCase__ )
a_ =collections.OrderedDict()
# Shared embeddings.
a_ =old["token_embedder/embedding"]
# Encoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
a_ =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_attention_layer_norm" )
a_ =tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "attention" )
a_ =layer_norm
a_ =k.T
a_ =o.T
a_ =q.T
a_ =v.T
# Block i, layer 1 (MLP).
a_ =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_mlp_layer_norm" )
a_ =tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , lowerCamelCase__ )
a_ =layer_norm
if split_mlp_wi:
a_ =wi[0].T
a_ =wi[1].T
else:
a_ =wi.T
a_ =wo.T
a_ =old[
"encoder/relpos_bias/rel_embedding"
].T
a_ =old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
a_ =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_self_attention_layer_norm" )
a_ =tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "self_attention" )
a_ =layer_norm
a_ =k.T
a_ =o.T
a_ =q.T
a_ =v.T
# Block i, layer 1 (Cross Attention).
a_ =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_cross_attention_layer_norm" )
a_ =tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "encoder_decoder_attention" )
a_ =layer_norm
a_ =k.T
a_ =o.T
a_ =q.T
a_ =v.T
# Block i, layer 2 (MLP).
a_ =tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_mlp_layer_norm" )
a_ =tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , lowerCamelCase__ )
a_ =layer_norm
if split_mlp_wi:
a_ =wi[0].T
a_ =wi[1].T
else:
a_ =wi.T
a_ =wo.T
a_ =old["decoder/decoder_norm/scale"]
a_ =old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a_ =old["decoder/logits_dense/kernel"].T
return new
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a_ =state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a_ =state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
a_ =state_dict["shared.weight"]
return state_dict
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =checkpoints.load_tax_checkpoint(lowerCamelCase__ )
a_ =convert_tax_to_pytorch(lowerCamelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase__ )
a_ =make_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
'''simple docstring'''
a_ =TaConfig.from_json_file(lowerCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a_ =TaEncoderModel(lowerCamelCase__ )
else:
a_ =TaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCamelCase__ )
print("Done" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
lowercase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = KandinskyVaaPriorPipeline
__magic_name__ : Dict = ["prompt"]
__magic_name__ : List[str] = ["prompt", "negative_prompt"]
__magic_name__ : Union[str, Any] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
__magic_name__ : Union[str, Any] = False
@property
def lowercase_ ( self) -> str:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return 3_2
@property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return 1_0_0
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
a_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__A)
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
torch.manual_seed(0)
a_ ={
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
a_ =PriorTransformer(**__A)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a_ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
a_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
a_ =CLIPVisionModelWithProjection(__A)
return model
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__A , do_normalize=__A , do_resize=__A , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , )
return image_processor
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.dummy_prior
a_ =self.dummy_image_encoder
a_ =self.dummy_text_encoder
a_ =self.dummy_tokenizer
a_ =self.dummy_image_processor
a_ =UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=__A , clip_sample_range=1_0.0 , )
a_ ={
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=0) -> Tuple:
"""simple docstring"""
if str(__A).startswith("mps"):
a_ =torch.manual_seed(__A)
else:
a_ =torch.Generator(device=__A).manual_seed(__A)
a_ ={
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ ="cpu"
a_ =self.get_dummy_components()
a_ =self.pipeline_class(**__A)
a_ =pipe.to(__A)
pipe.set_progress_bar_config(disable=__A)
a_ =pipe(**self.get_dummy_inputs(__A))
a_ =output.image_embeds
a_ =pipe(
**self.get_dummy_inputs(__A) , return_dict=__A , )[0]
a_ =image[0, -1_0:]
a_ =image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
a_ =np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =torch_device == "cpu"
a_ =True
a_ =False
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , test_mean_pixel_difference=__A , )
@skip_mps
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =torch_device == "cpu"
a_ =False
self._test_attention_slicing_forward_pass(
test_max_difference=__A , test_mean_pixel_difference=__A , )
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_)
class UpperCAmelCase ( UpperCamelCase_):
'''simple docstring'''
__magic_name__ : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True})
__magic_name__ : ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")})
__magic_name__ : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string"),
"answer_start": Value("int32"),
})
})
__magic_name__ : str = "question"
__magic_name__ : str = "context"
__magic_name__ : str = "answers"
@property
def lowercase_ ( self) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def UpperCAmelCase_ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
a_ =1
a_ =2
for i in range(2 , max_n + 1 ):
a_ =pre_numerator
a_ =2 * i // 3 if i % 3 == 0 else 1
a_ =cur_numerator
a_ =e_cont * pre_numerator + temp
return sum_digits(__UpperCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = FunnelTokenizer
__magic_name__ : List[Any] = FunnelTokenizerFast
__magic_name__ : List[str] = True
__magic_name__ : int = True
def lowercase_ ( self) -> int:
"""simple docstring"""
super().setUp()
a_ =[
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def lowercase_ ( self , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def lowercase_ ( self , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="UNwant\u00E9d,running"
a_ ="unwanted, running"
return input_text, output_text
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.tokenizer_class(self.vocab_file)
a_ =tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , [7, 4, 5, 1_0, 8, 9])
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.get_tokenizers(do_lower_case=_UpperCAmelCase)
for tokenizer in tokenizers:
a_ =tokenizer("UNwant\u00E9d,running")
a_ =len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
a_ =tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len)
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
import numpy as np
import datasets
lowercase = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
lowercase = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
lowercase = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase ( datasets.Metric):
'''simple docstring'''
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence") , id="X"),
}) , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =np.array(_UpperCamelCase)
a_ =np.array(_UpperCamelCase)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension")
# Get mahalanobis distance for each prediction
a_ =X - np.mean(_UpperCamelCase)
a_ =np.cov(reference_distribution.T)
try:
a_ =np.linalg.inv(_UpperCamelCase)
except np.linalg.LinAlgError:
a_ =np.linalg.pinv(_UpperCamelCase)
a_ =np.dot(_UpperCamelCase , _UpperCamelCase)
a_ =np.dot(_UpperCamelCase , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = '''▁'''
lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowercase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
lowercase = {
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
lowercase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase ( a__):
'''simple docstring'''
__magic_name__ : str = VOCAB_FILES_NAMES
__magic_name__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[Any] = ["input_ids", "attention_mask"]
__magic_name__ : Optional[int] = []
__magic_name__ : str = []
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> int:
"""simple docstring"""
a_ =AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token
a_ ={} if sp_model_kwargs is None else sp_model_kwargs
a_ =legacy_behaviour
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , tokenizer_file=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_A , **_A , )
a_ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_A))
a_ =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a_ ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a_ =1
a_ =len(self.sp_model)
a_ ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A)
}
a_ ={v: k for k, v in self.lang_code_to_id.items()}
a_ =len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
a_ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
a_ =list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
a_ =src_lang if src_lang is not None else 'eng_Latn'
a_ =self.lang_code_to_id[self._src_lang]
a_ =tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> List[Any]:
"""simple docstring"""
a_ =self.__dict__.copy()
a_ =None
a_ =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a_ ={}
a_ =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowercase_ ( self , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False) -> Optional[Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
a_ =[1] * len(self.prefix_tokens)
a_ =[1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(_A)) + suffix_ones
return prefix_ones + ([0] * len(_A)) + ([0] * len(_A)) + suffix_ones
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> str:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> List[Any]:
"""simple docstring"""
a_ =[self.sep_token_id]
a_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
a_ =src_lang
a_ =self(_A , add_special_tokens=_A , return_tensors=_A , **_A)
a_ =self.convert_tokens_to_ids(_A)
a_ =tgt_lang_id
return inputs
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ ={self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A)
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ =self.sp_model.PieceToId(_A)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =''.join(_A).replace(_A , " ").strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple:
"""simple docstring"""
if not os.path.isdir(_A):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a_ =os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _A)
elif not os.path.isfile(self.vocab_file):
with open(_A , "wb") as fi:
a_ =self.sp_model.serialized_model_proto()
fi.write(_A)
return (out_vocab_file,)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = "eng_Latn" , lowerCAmelCase_ = None , lowerCAmelCase_ = "fra_Latn" , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =src_lang
a_ =tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A)
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def lowercase_ ( self) -> str:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a_ =[]
a_ =[self.eos_token_id, self.cur_lang_code]
else:
a_ =[self.cur_lang_code]
a_ =[self.eos_token_id]
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =self.lang_code_to_id[lang]
if self.legacy_behaviour:
a_ =[]
a_ =[self.eos_token_id, self.cur_lang_code]
else:
a_ =[self.cur_lang_code]
a_ =[self.eos_token_id]
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=3 , lowerCAmelCase_=3_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_0 , lowerCAmelCase_=[8, 1_6, 3_2, 6_4] , lowerCAmelCase_=[1, 1, 2, 1] , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=["stage2", "stage3", "stage4"] , lowerCAmelCase_=[2, 3, 4] , lowerCAmelCase_=1 , ) -> int:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =image_size
a_ =num_channels
a_ =embeddings_size
a_ =hidden_sizes
a_ =depths
a_ =is_training
a_ =use_labels
a_ =hidden_act
a_ =num_labels
a_ =scope
a_ =len(__lowerCamelCase)
a_ =out_features
a_ =out_indices
a_ =num_groups
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.num_labels)
a_ =self.get_config()
return config, pixel_values, labels
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =BitModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
a_ =model(__lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =self.num_labels
a_ =BitForImageClassification(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
a_ =model(__lowerCamelCase , labels=__lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =BitBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
a_ =model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
a_ =None
a_ =BitBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
a_ =model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
a_ =config_and_inputs
a_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _A , _A , unittest.TestCase):
'''simple docstring'''
__magic_name__ : str = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__magic_name__ : List[Any] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : Tuple = False
__magic_name__ : Optional[Any] = False
__magic_name__ : List[str] = False
__magic_name__ : Optional[int] = False
__magic_name__ : Optional[Any] = False
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =BitModelTester(self)
a_ =ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions")
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings")
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
pass
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(__lowerCamelCase)
a_ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ =[*signature.parameters.keys()]
a_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(config=__lowerCamelCase)
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
a_ =model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
a_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ =self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ =self.model_tester.prepare_config_and_inputs_for_common()
a_ =["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ =layer_type
a_ =True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ =True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
@unittest.skip(reason="Bit does not use feedforward chunking")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase)
@slow
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =BitModel.from_pretrained(__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__lowerCamelCase)
a_ =self.default_image_processor
a_ =prepare_img()
a_ =image_processor(images=__lowerCamelCase , return_tensors="pt").to(__lowerCamelCase)
# forward pass
with torch.no_grad():
a_ =model(**__lowerCamelCase)
# verify the logits
a_ =torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __lowerCamelCase)
a_ =torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4))
@require_torch
class UpperCAmelCase ( _A , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (BitBackbone,) if is_torch_available() else ()
__magic_name__ : int = BitConfig
__magic_name__ : Any = False
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =BitModelTester(self)
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(a_ , int(b / 2 ) ) * actual_power(a_ , int(b / 2 ) )
else:
return a * actual_power(a_ , int(b / 2 ) ) * actual_power(a_ , int(b / 2 ) )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b < 0:
return 1 / actual_power(a_ , a_ )
return actual_power(a_ , a_ )
if __name__ == "__main__":
print(power(-2, -3))
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for char in word:
a_ =ord(_A )
if not _is_chinese_char(_A ):
return 0
return 1
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =set()
for token in tokens:
a_ =len(_A ) > 1 and is_chinese(_A )
if chinese_word:
word_set.add(_A )
a_ =list(_A )
return word_list
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
a_ =max([len(_A ) for w in chinese_word_set] )
a_ =bert_tokens
a_ , a_ =0, len(_A )
while start < end:
a_ =True
if is_chinese(bert_word[start] ):
a_ =min(end - start , _A )
for i in range(_A , 1 , -1 ):
a_ ="".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ ="##" + bert_word[j]
a_ =start + i
a_ =False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[]
for i in range(0 , len(_A ) , 1_0_0 ):
a_ =ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
a_ =[get_chinese_word(_A ) for r in res]
ltp_res.extend(_A )
assert len(_A ) == len(_A )
a_ =[]
for i in range(0 , len(_A ) , 1_0_0 ):
a_ =bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_A , truncation=_A , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(_A ) == len(_A )
a_ =[]
for input_ids, chinese_word in zip(_A , _A ):
a_ =[]
for id in input_ids:
a_ =bert_tokenizer._convert_id_to_token(_A )
input_tokens.append(_A )
a_ =add_sub_symbol(_A , _A )
a_ =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_A ):
if token[:2] == "##":
a_ =token[2:]
# save chinese tokens' pos
if len(_A ) == 1 and _is_chinese_char(ord(_A ) ):
ref_id.append(_A )
ref_ids.append(_A )
assert len(_A ) == len(_A )
return ref_ids
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[line.strip() for line in data if len(_A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ =LTP(args.ltp ) # faster in GPU device
a_ =BertTokenizer.from_pretrained(args.bert )
a_ =prepare_ref(_A , _A , _A )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
a_ =[json.dumps(_A ) + "\n" for ref in ref_ids]
f.writelines(_A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
lowercase = parser.parse_args()
main(args)
| 719
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41
| 0
|
'''simple docstring'''
lowercase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert len(str(lowercase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
a_ =year // 1_0_0
a_ =(5 * (century % 4) + 2) % 7
a_ =year % 1_0_0
a_ =centurian % 1_2
a_ =(
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a_ =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a_ =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
set_seed(770)
lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowercase = os.path.dirname(os.path.abspath(__file__))
lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def UpperCAmelCase_ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
a_ =model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type == "text":
a_ =BarkSemanticModel
a_ =BarkSemanticConfig
a_ =BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ =BarkCoarseModel
a_ =BarkCoarseConfig
a_ =BarkCoarseGenerationConfig
elif model_type == "fine":
a_ =BarkFineModel
a_ =BarkFineConfig
a_ =BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ =F"""{model_type}_small""" if use_small else model_type
a_ =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
a_ =torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
a_ =checkpoint["model_args"]
if "input_vocab_size" not in model_args:
a_ =model_args["vocab_size"]
a_ =model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ =model_args.pop("n_head" )
a_ =model_args.pop("n_embd" )
a_ =model_args.pop("n_layer" )
a_ =ConfigClass(**checkpoint["model_args"] )
a_ =ModelClass(config=lowercase__ )
a_ =GenerationConfigClass()
a_ =model_generation_config
a_ =checkpoint["model"]
# fixup checkpoint
a_ ="_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
a_ =k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
a_ =new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
a_ =state_dict.pop(lowercase__ )
a_ =set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ ={k for k in extra_keys if not k.endswith(".attn.bias" )}
a_ =set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ ={k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowercase__ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
a_ =model.num_parameters(exclude_embeddings=lowercase__ )
a_ =checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__=False , lowercase__="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ ="cpu" # do conversion on cpu
a_ =_get_ckpt_path(lowercase__ , use_small=lowercase__ )
a_ =_load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
a_ =_bark_load_model(lowercase__ , "cpu" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
a_ =bark_model["model"]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
a_ =5
a_ =1_0
if model_type in ["text", "coarse"]:
a_ =torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
a_ =bark_model(lowercase__ )[0]
a_ =model(lowercase__ )
# take last logits
a_ =output_new_model_total.logits[:, [-1], :]
else:
a_ =3
a_ =8
a_ =torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ =model(lowercase__ , lowercase__ )
a_ =bark_model(lowercase__ , lowercase__ )
a_ =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
a_ =os.path.join(lowercase__ , lowercase__ )
a_ =BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =BarkFineConfig.from_pretrained(os.path.join(lowercase__ , "config.json" ) )
a_ =EncodecConfig.from_pretrained("facebook/encodec_24khz" )
a_ =BarkSemanticModel.from_pretrained(lowercase__ )
a_ =BarkCoarseModel.from_pretrained(lowercase__ )
a_ =BarkFineModel.from_pretrained(lowercase__ )
a_ =EncodecModel.from_pretrained("facebook/encodec_24khz" )
a_ =BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
a_ =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ =BarkModel(lowercase__ )
a_ =semantic
a_ =coarseAcoustic
a_ =fineAcoustic
a_ =codec
a_ =bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =TFCamembertModel.from_pretrained("jplu/tf-camembert-base")
a_ =tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
a_ =model(_lowercase)["""last_hidden_state"""]
a_ =tf.TensorShape((1, 1_0, 7_6_8))
self.assertEqual(output.shape , _lowercase)
# compare the actual values for a slice.
a_ =tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 721
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =str(lowercase__ )
return len(lowercase__ ) == 9 and set(lowercase__ ) == set("123456789" )
def UpperCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
a_ =1_0_0_0_0_2 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
a_ =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowercase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 41
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''PerceiverFeatureExtractor''']
lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase :
'''simple docstring'''
@property
def lowercase_ ( self) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""")
def lowercase_ ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Dict:
"""simple docstring"""
a_ =4
a_ =3_2
a_ =(3_2, 3_2)
a_ =torch.manual_seed(0)
a_ =torch.device(lowerCAmelCase_)
a_ =(batch_size, num_channels) + sizes
a_ =randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
a_ ={"hidden_states": hidden_states}
if include_temb:
a_ =1_2_8
a_ =randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
if include_res_hidden_states_tuple:
a_ =torch.manual_seed(1)
a_ =(randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_),)
if include_encoder_hidden_states:
a_ =floats_tensor((batch_size, 3_2, 3_2)).to(lowerCAmelCase_)
if include_skip_sample:
a_ =randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_)
return dummy_input
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ ={
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
a_ =3_2
if self.block_type == "mid":
init_dict.pop("out_channels")
a_ =self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self , lowerCAmelCase_) -> Dict:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
unet_block.to(lowerCAmelCase_)
unet_block.eval()
with torch.no_grad():
a_ =unet_block(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
self.assertEqual(output.shape , self.output_shape)
a_ =output[0, -1, -3:, -3:]
a_ =torch.tensor(lowerCAmelCase_).to(lowerCAmelCase_)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5e-3)
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps")
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.prepare_init_args_and_inputs_for_common()
a_ =self.block_class(**lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
a_ =model(**lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
a_ =output[0]
a_ =torch.device(lowerCAmelCase_)
a_ =randn_tensor(output.shape , device=lowerCAmelCase_)
a_ =torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
| 41
| 0
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase = logging.get_logger(__name__)
# General docstring
lowercase = '''RegNetConfig'''
# Base docstring
lowercase = '''facebook/regnet-y-040'''
lowercase = [1, 1_088, 7, 7]
# Image classification docstring
lowercase = '''facebook/regnet-y-040'''
lowercase = '''tabby, tabby cat'''
lowercase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , **lowerCAmelCase_ , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
a_ =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
a_ =tf.keras.layers.ConvaD(
filters=lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , strides=lowerCAmelCase_ , padding="VALID" , groups=lowerCAmelCase_ , use_bias=lowerCAmelCase_ , name="convolution" , )
a_ =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization")
a_ =ACTaFN[activation] if activation is not None else tf.identity
def lowercase_ ( self , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =self.convolution(self.padding(lowerCAmelCase_))
a_ =self.normalization(lowerCAmelCase_)
a_ =self.activation(lowerCAmelCase_)
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =config.num_channels
a_ =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =shape_list(lowerCAmelCase_)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
a_ =tf.transpose(lowerCAmelCase_ , perm=(0, 2, 3, 1))
a_ =self.embedder(lowerCAmelCase_)
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 2 , **lowerCAmelCase_) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =tf.keras.layers.ConvaD(
filters=lowerCAmelCase_ , kernel_size=1 , strides=lowerCAmelCase_ , use_bias=lowerCAmelCase_ , name="convolution")
a_ =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = False) -> Any:
"""simple docstring"""
return self.normalization(self.convolution(lowerCAmelCase_) , training=lowerCAmelCase_)
class UpperCAmelCase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase_ , name="pooler")
a_ =[
tf.keras.layers.ConvaD(filters=lowerCAmelCase_ , kernel_size=1 , activation="relu" , name="attention.0"),
tf.keras.layers.ConvaD(filters=lowerCAmelCase_ , kernel_size=1 , activation="sigmoid" , name="attention.2"),
]
def lowercase_ ( self , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =self.pooler(lowerCAmelCase_)
for layer_module in self.attention:
a_ =layer_module(lowerCAmelCase_)
a_ =hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =in_channels != out_channels or stride != 1
a_ =max(1 , out_channels // config.groups_width)
a_ =(
TFRegNetShortCut(lowerCAmelCase_ , stride=lowerCAmelCase_ , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
a_ =[
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act , name="layer.1"),
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ , name="layer.2"),
]
a_ =ACTaFN[config.hidden_act]
def lowercase_ ( self , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =hidden_state
for layer_module in self.layers:
a_ =layer_module(lowerCAmelCase_)
a_ =self.shortcut(lowerCAmelCase_)
hidden_state += residual
a_ =self.activation(lowerCAmelCase_)
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =in_channels != out_channels or stride != 1
a_ =max(1 , out_channels // config.groups_width)
a_ =(
TFRegNetShortCut(lowerCAmelCase_ , stride=lowerCAmelCase_ , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
a_ =[
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act , name="layer.1"),
TFRegNetSELayer(lowerCAmelCase_ , reduced_channels=int(round(in_channels / 4)) , name="layer.2"),
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ , name="layer.3"),
]
a_ =ACTaFN[config.hidden_act]
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =hidden_state
for layer_module in self.layers:
a_ =layer_module(lowerCAmelCase_)
a_ =self.shortcut(lowerCAmelCase_)
hidden_state += residual
a_ =self.activation(lowerCAmelCase_)
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , **lowerCAmelCase_) -> str:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
a_ =[
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , name="layers.0"),
*[layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , name=f"""layers.{i+1}""") for i in range(depth - 1)],
]
def lowercase_ ( self , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
for layer_module in self.layers:
a_ =layer_module(lowerCAmelCase_)
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ))
a_ =zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCAmelCase_ , config.depths[1:])):
self.stages.append(TFRegNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ , name=f"""stages.{i+1}"""))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True) -> List[Any]:
"""simple docstring"""
a_ =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ =hidden_states + (hidden_state,)
a_ =stage_module(lowerCAmelCase_)
if output_hidden_states:
a_ =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_)
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer):
'''simple docstring'''
__magic_name__ : Tuple = RegNetConfig
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
a_ =config
a_ =TFRegNetEmbeddings(lowerCAmelCase_ , name="embedder")
a_ =TFRegNetEncoder(lowerCAmelCase_ , name="encoder")
a_ =tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase_ , name="pooler")
@unpack_inputs
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> Optional[Any]:
"""simple docstring"""
a_ =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ =return_dict if return_dict is not None else self.config.use_return_dict
a_ =self.embedder(lowerCAmelCase_ , training=lowerCAmelCase_)
a_ =self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , training=lowerCAmelCase_)
a_ =encoder_outputs[0]
a_ =self.pooler(lowerCAmelCase_)
# Change to NCHW output format have uniformity in the modules
a_ =tf.transpose(lowerCAmelCase_ , perm=(0, 3, 1, 2))
a_ =tf.transpose(lowerCAmelCase_ , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
a_ =tuple([tf.transpose(lowerCAmelCase_ , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( lowercase__):
'''simple docstring'''
__magic_name__ : Tuple = RegNetConfig
__magic_name__ : Union[str, Any] = 'regnet'
__magic_name__ : Optional[int] = 'pixel_values'
@property
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa)}
lowercase = R'''\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'''
lowercase = R'''\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowercase__ , )
class UpperCAmelCase ( lowercase__):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_)
a_ =TFRegNetMainLayer(lowerCAmelCase_ , name="regnet")
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , ) -> List[str]:
"""simple docstring"""
a_ =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ =return_dict if return_dict is not None else self.config.use_return_dict
a_ =self.regnet(
pixel_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , training=lowerCAmelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase__ , )
class UpperCAmelCase ( lowercase__ , lowercase__):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_)
a_ =config.num_labels
a_ =TFRegNetMainLayer(lowerCAmelCase_ , name="regnet")
# classification head
a_ =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , ) -> List[Any]:
"""simple docstring"""
a_ =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ =return_dict if return_dict is not None else self.config.use_return_dict
a_ =self.regnet(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , training=lowerCAmelCase_)
a_ =outputs.pooler_output if return_dict else outputs[1]
a_ =self.classifier[0](lowerCAmelCase_)
a_ =self.classifier[1](lowerCAmelCase_)
a_ =None if labels is None else self.hf_compute_loss(labels=lowerCAmelCase_ , logits=lowerCAmelCase_)
if not return_dict:
a_ =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states)
| 701
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase__ ):
print(F"""{i}\t\t{d}""" )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[float("inf" )] * vertex_count
a_ =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase__ ):
a_ , a_ , a_ =(graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
a_ =distance[u] + w
a_ =check_negative_cycle(lowercase__ , lowercase__ , lowercase__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input('''Enter number of vertices: ''').strip())
lowercase = int(input('''Enter number of edges: ''').strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase = int(input('''\nEnter shortest path source:''').strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41
| 0
|
'''simple docstring'''
from random import randint, random
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = False , lowercase__ = 5 , ):
'''simple docstring'''
a_ =[[-1] * number_of_cells] # Create a highway without any car
a_ =0
a_ =max(lowercase__ , 0 )
while i < number_of_cells:
a_ =(
randint(0 , lowercase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =0
a_ =highway_now[car_index + 1 :]
for cell in range(len(lowercase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase__ , -1 )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =len(lowercase__ )
# Beforce calculations, the highway is empty
a_ =[-1] * number_of_cells
for car_index in range(lowercase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
a_ =min(highway_now[car_index] + 1 , lowercase__ )
# Number of empty cell before the next car
a_ =get_distance(lowercase__ , lowercase__ ) - 1
# We can't have the car causing an accident
a_ =min(next_highway[car_index] , lowercase__ )
if random() < probability:
# Randomly, a driver will slow down
a_ =max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =len(highway[0] )
for i in range(lowercase__ ):
a_ =update(highway[i] , lowercase__ , lowercase__ )
a_ =[-1] * number_of_cells
for car_index in range(lowercase__ ):
a_ =next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
a_ =(car_index + speed) % number_of_cells
# Commit the change of position
a_ =speed
highway.append(lowercase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 0
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowercase = """base_with_context"""
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
a_ =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case_ )
for lyr_num, lyr in enumerate(model.encoders ):
a_ =weights[F"""layers_{lyr_num}"""]
a_ =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
a_ =ly_weight["attention"]
a_ =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
a_ =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case_ )
for lyr_num, lyr in enumerate(model.encoders ):
a_ =weights[F"""layers_{lyr_num}"""]
a_ =ly_weight["attention"]
a_ =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
a_ =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
a_ =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
a_ =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case_ )
a_ =nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
a_ =weights[F"""layers_{lyr_num}"""]
a_ =nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
a_ =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
a_ =ly_weight["self_attention"]
a_ =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
a_ =ly_weight["MultiHeadDotProductAttention_0"]
a_ =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
a_ =nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
a_ =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
a_ =nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
a_ =nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =checkpoints.load_tax_checkpoint(args.checkpoint_path )
a_ =jnp.tree_util.tree_map(onp.array , snake_case_ )
a_ =[
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
a_ =os.path.join(args.checkpoint_path , ".." , "config.gin" )
a_ =inference.parse_training_gin_file(snake_case_ , snake_case_ )
a_ =inference.InferenceModel(args.checkpoint_path , snake_case_ )
a_ =DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
a_ =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
a_ =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
a_ =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
a_ =load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , snake_case_ )
a_ =load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , snake_case_ )
a_ =load_decoder(ta_checkpoint["target"]["decoder"] , snake_case_ )
a_ =OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
a_ =SpectrogramDiffusionPipeline(
notes_encoder=snake_case_ , continuous_encoder=snake_case_ , decoder=snake_case_ , scheduler=snake_case_ , melgan=snake_case_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
lowercase = parser.parse_args()
main(args)
| 703
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowerCamelCase )] )
a_ =np.array(_lowerCamelCase )
a_ =np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _lowerCamelCase ) ) , x.transpose() ) , _lowerCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =(1, 2, 1)
a_ =(1, 1, 0, 7)
a_ =SARIMAX(
_lowerCamelCase , exog=_lowerCamelCase , order=_lowerCamelCase , seasonal_order=_lowerCamelCase )
a_ =model.fit(disp=_lowerCamelCase , maxiter=6_0_0 , method="nm" )
a_ =model_fit.predict(1 , len(_lowerCamelCase ) , exog=[test_match] )
return result[0]
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_lowerCamelCase , _lowerCamelCase )
a_ =regressor.predict(_lowerCamelCase )
return y_pred[0]
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
train_user.sort()
a_ =np.percentile(_lowerCamelCase , 2_5 )
a_ =np.percentile(_lowerCamelCase , 7_5 )
a_ =qa - qa
a_ =qa - (iqr * 0.1)
return low_lim
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =0
a_ =0
for i in list_vote:
if i > actual_result:
a_ =not_safe + 1
else:
if abs(abs(_lowerCamelCase ) - abs(_lowerCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
lowercase = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
lowercase = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase = normalize_df[:, 2].tolist()
lowercase = normalize_df[:, 0].tolist()
lowercase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase = normalize_df[:, [1, 2]].tolist()
lowercase = x[: len(x) - 1]
lowercase = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase = total_date[: len(total_date) - 1]
lowercase = total_user[: len(total_user) - 1]
lowercase = total_match[: len(total_match) - 1]
lowercase = total_date[len(total_date) - 1 :]
lowercase = total_user[len(total_user) - 1 :]
lowercase = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase = "" if data_safety_checker(res_vote, tst_user) else "not "
print('''Today\'s data is {not_str}safe.''')
| 704
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41
| 0
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
__magic_name__ : Any = "M-CLIP"
def __init__( self , lowerCAmelCase_=1_0_2_4 , lowerCAmelCase_=7_6_8 , **lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =transformerDimSize
a_ =imageDimSize
super().__init__(**_a)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
__magic_name__ : Optional[int] = MCLIPConfig
def __init__( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
super().__init__(_a , *_a , **_a)
a_ =XLMRobertaModel(_a)
a_ =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
a_ =self.transformer(input_ids=_a , attention_mask=_a)[0]
a_ =(embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_a), embs
| 705
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowercase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =EfficientNetConfig()
a_ =CONFIG_MAP[model_name]["hidden_dim"]
a_ =CONFIG_MAP[model_name]["width_coef"]
a_ =CONFIG_MAP[model_name]["depth_coef"]
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =CONFIG_MAP[model_name]["dropout_rate"]
a_ =CONFIG_MAP[model_name]["dw_padding"]
a_ ="huggingface/label-files"
a_ ="imagenet-1k-id2label.json"
a_ =1_0_0_0
a_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
a_ ={int(lowercase__ ): v for k, v in idalabel.items()}
a_ =idalabel
a_ ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
a_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =[v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
a_ =sorted(set(lowercase__ ) )
a_ =len(lowercase__ )
a_ ={b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
a_ =[]
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
a_ =block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
a_ ={}
for item in rename_keys:
if item[0] in original_param_names:
a_ ="efficientnet." + item[1]
a_ ="classifier.weight"
a_ ="classifier.bias"
return key_mapping
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
a_ =key_mapping[key]
if "_conv" in key and "kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
a_ =torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
a_ =torch.from_numpy(np.transpose(lowercase__ ) )
else:
a_ =torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation="softmax" , )
a_ =original_model.trainable_variables
a_ =original_model.non_trainable_variables
a_ ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a_ =param.numpy()
a_ =list(tf_params.keys() )
# Load HuggingFace model
a_ =get_efficientnet_config(lowercase__ )
a_ =EfficientNetForImageClassification(lowercase__ ).eval()
a_ =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
a_ =rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
a_ =convert_image_processor(lowercase__ )
a_ =preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
a_ =hf_model(**lowercase__ )
a_ =outputs.logits.detach().numpy()
# Original model inference
a_ =False
a_ =CONFIG_MAP[model_name]["image_size"]
a_ =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
a_ =image.img_to_array(lowercase__ )
a_ =np.expand_dims(lowercase__ , axis=0 )
a_ =original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
a_ =F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowercase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase = logging.get_logger(__name__)
lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : List[Any] = "deformable_detr"
__magic_name__ : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=3_0_0 , lowerCAmelCase_=1_0_2_4 , lowerCAmelCase_=6 , lowerCAmelCase_=1_0_2_4 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=1_0_2_4 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_5_6 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1.0 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_="resnet50" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_=False , lowerCAmelCase_=3_0_0 , lowerCAmelCase_=False , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.2_5 , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
a_ =CONFIG_MAPPING["""resnet"""](out_features=["stage4"])
elif isinstance(a_ , a_):
a_ =backbone_config.get("model_type")
a_ =CONFIG_MAPPING[backbone_model_type]
a_ =config_class.from_dict(a_)
a_ =use_timm_backbone
a_ =backbone_config
a_ =num_channels
a_ =num_queries
a_ =max_position_embeddings
a_ =d_model
a_ =encoder_ffn_dim
a_ =encoder_layers
a_ =encoder_attention_heads
a_ =decoder_ffn_dim
a_ =decoder_layers
a_ =decoder_attention_heads
a_ =dropout
a_ =attention_dropout
a_ =activation_dropout
a_ =activation_function
a_ =init_std
a_ =init_xavier_std
a_ =encoder_layerdrop
a_ =auxiliary_loss
a_ =position_embedding_type
a_ =backbone
a_ =use_pretrained_backbone
a_ =dilation
# deformable attributes
a_ =num_feature_levels
a_ =encoder_n_points
a_ =decoder_n_points
a_ =two_stage
a_ =two_stage_num_proposals
a_ =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
a_ =class_cost
a_ =bbox_cost
a_ =giou_cost
# Loss coefficients
a_ =mask_loss_coefficient
a_ =dice_loss_coefficient
a_ =bbox_loss_coefficient
a_ =giou_loss_coefficient
a_ =eos_coefficient
a_ =focal_alpha
a_ =disable_custom_kernels
super().__init__(is_encoder_decoder=a_ , **a_)
@property
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
return self.d_model
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
a_ =self.backbone_config.to_dict()
a_ =self.__class__.model_type
return output
| 706
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41
| 0
|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =inspect.getfile(accelerate.test_utils)
a_ =os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ =os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =f"""\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n """.split()
a_ =[sys.executable] + distributed_args
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy())
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =0
@slow
def lowercase_ ( self) -> Any:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
a_ =AutoTokenizer.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(lowercase_) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
a_ =AutoTokenizer.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
self.assertIsInstance(lowercase_ , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(lowercase_) , 0)
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 2_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =AutoConfig.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
# Check that tokenizer_type ≠ model_type
a_ =AutoTokenizer.from_pretrained(lowercase_ , config=lowercase_)
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def lowercase_ ( self) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(lowercase_ , "vocab.txt"))
a_ =AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="bert" , use_fast=lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(lowercase_ , "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(lowercase_ , "merges.txt"))
a_ =AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="gpt2" , use_fast=lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
@require_tokenizers
def lowercase_ ( self) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(lowercase_ , "vocab.txt"))
a_ =AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="bert")
self.assertIsInstance(lowercase_ , lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(lowercase_ , "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(lowercase_ , "merges.txt"))
a_ =AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type="gpt2")
self.assertIsInstance(lowercase_ , lowercase_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(lowercase_):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx")
@require_tokenizers
def lowercase_ ( self) -> str:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
a_ =tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased")
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast))
if isinstance(lowercase_ , lowercase_):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase_)
else:
self.assertEqual(tokenizer.do_lower_case , lowercase_)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
@require_tokenizers
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase_ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
a_ =tokenizer_class.from_pretrained("julien-c/herlolip-not-exists")
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =TOKENIZER_MAPPING.values()
a_ =[]
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase_)
@require_tokenizers
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=lowercase_) , lowercase_)
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased") , lowercase_)
@require_tokenizers
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=lowercase_)
a_ ="""Hello, world. How are you?"""
a_ =tokenizer.tokenize(lowercase_)
self.assertEqual("[UNK]" , tokens[0])
a_ =AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=lowercase_)
a_ =tokenizer.tokenize(lowercase_)
self.assertEqual("[UNK]" , tokens[0])
@require_tokenizers
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config")
self.assertEqual(type(lowercase_) , lowercase_)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0)
self.assertEqual(tokenizer.unk_token , "[UNK]")
self.assertEqual(tokenizer.padding_side , "right")
self.assertEqual(tokenizer.truncation_side , "right")
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_)
a_ =AutoTokenizer.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 1_2)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained("ctrl")
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase_ , lowercase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =get_tokenizer_config("bert-base-cased")
a_ =config.pop("_commit_hash" , lowercase_)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase_ , {"do_lower_case": False})
# This model does not have a tokenizer_config so we get back an empty dict.
a_ =get_tokenizer_config(lowercase_)
self.assertDictEqual(lowercase_ , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
a_ =AutoTokenizer.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_)
a_ =get_tokenizer_config(lowercase_)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer")
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
try:
AutoConfig.register("custom" , lowercase_)
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_):
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_)
a_ =CustomTokenizer.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_)
a_ =AutoTokenizer.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self) -> str:
"""simple docstring"""
try:
AutoConfig.register("custom" , lowercase_)
# Can register in two steps
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase_ , slow_tokenizer_class=lowercase_ , fast_tokenizer_class=lowercase_)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_):
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
a_ =BertTokenizerFast.from_pretrained(lowercase_)
bert_tokenizer.save_pretrained(lowercase_)
a_ =CustomTokenizerFast.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_)
a_ =AutoTokenizer.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
a_ =AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(lowercase_):
a_ =AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer")
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_):
a_ =AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowercase_)
a_ =AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowercase_)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_)
a_ =AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast")
# Test we can also load the slow version
a_ =AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowercase_ , use_fast=lowercase_)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_)
a_ =AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ , use_fast=lowercase_)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer")
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer")
@require_tokenizers
def lowercase_ ( self) -> str:
"""simple docstring"""
class UpperCAmelCase ( _UpperCAmelCase):
'''simple docstring'''
__magic_name__ : List[str] = False
class UpperCAmelCase ( _UpperCAmelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = NewTokenizer
__magic_name__ : Tuple = False
try:
AutoConfig.register("custom" , lowercase_)
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_)
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_)
# If remote code is not set, the default is to use local
a_ =AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer")
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertFalse(tokenizer.special_attribute_present)
a_ =AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=lowercase_)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
a_ =AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowercase_)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertFalse(tokenizer.special_attribute_present)
a_ =AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowercase_ , use_fast=lowercase_)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
a_ =AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowercase_)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertTrue(tokenizer.special_attribute_present)
a_ =AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowercase_ , use_fast=lowercase_)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=lowercase_)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
# Test we can also load the slow version
a_ =AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=lowercase_ , use_fast=lowercase_)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
def lowercase_ ( self) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
lowercase_ , "bert-base is not a local folder and is not a valid model identifier"):
a_ =AutoTokenizer.from_pretrained("bert-base")
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowercase_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
a_ =AutoTokenizer.from_pretrained(lowercase_ , revision="aaaaaa")
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
a_ =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
a_ =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base")
a_ ={
"input_ids": tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa),
}
a_ =model(lowerCAmelCase__)["last_hidden_state"]
a_ =tf.TensorShape((1, 6, 7_6_8))
self.assertEqual(output.shape , lowerCAmelCase__)
# compare the actual values for a slice.
a_ =tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 709
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "switch_transformers"
__magic_name__ : List[Any] = ["past_key_values"]
__magic_name__ : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , lowerCAmelCase_=3_2_1_2_8 , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=2_0_4_8 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3 , lowerCAmelCase_=1_2 , lowerCAmelCase_=8 , lowerCAmelCase_=False , lowerCAmelCase_=0.0_1 , lowerCAmelCase_="float32" , lowerCAmelCase_=False , lowerCAmelCase_=3_2 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1e-6 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=0.0_0_1 , lowerCAmelCase_=1.0 , lowerCAmelCase_="relu" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Optional[int]:
"""simple docstring"""
a_ =vocab_size
a_ =d_model
a_ =d_kv
a_ =d_ff
a_ =num_sparse_encoder_layers
a_ =num_layers
a_ =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ =num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ =self.num_layers // self.num_sparse_encoder_layers
else:
a_ =self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ =self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ =self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ =num_heads
a_ =num_experts
a_ =expert_capacity
a_ =router_bias
a_ =router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
a_ =router_dtype
a_ =router_ignore_padding_tokens
a_ =relative_attention_num_buckets
a_ =relative_attention_max_distance
a_ =dropout_rate
a_ =layer_norm_epsilon
a_ =initializer_factor
a_ =feed_forward_proj
a_ =use_cache
a_ =add_router_probs
a_ =router_z_loss_coef
a_ =router_aux_loss_coef
a_ =self.feed_forward_proj.split("-")
a_ =act_info[-1]
a_ =act_info[0] == "gated"
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ ="gelu_new"
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCAmelCase__ ) )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if index == len(UpperCAmelCase__ ):
return True
# Recursive Step
for i in range(UpperCAmelCase__ ):
if valid_coloring(graph[index] , UpperCAmelCase__ , UpperCAmelCase__ ):
# Color current vertex
a_ =i
# Validate coloring
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ):
return True
# Backtrack
a_ =-1
return False
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =[-1] * len(UpperCAmelCase__ )
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 0 ):
return colored_vertices
return []
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =iter(_snake_case )
while True:
a_ =tuple(itertools.islice(_snake_case , _snake_case ) )
if not chunk:
return
yield chunk
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ="".join([c.upper() for c in dirty if c in string.ascii_letters] )
a_ =""
if len(_snake_case ) < 2:
return dirty
for i in range(len(_snake_case ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_snake_case ) & 1:
clean += "X"
return clean
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ="ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
a_ =[]
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_snake_case )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_snake_case )
return table
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =generate_table(_snake_case )
a_ =prepare_input(_snake_case )
a_ =""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_snake_case , 2 ):
a_ , a_ =divmod(table.index(_snake_case ) , 5 )
a_ , a_ =divmod(table.index(_snake_case ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =generate_table(_snake_case )
a_ =""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_snake_case , 2 ):
a_ , a_ =divmod(table.index(_snake_case ) , 5 )
a_ , a_ =divmod(table.index(_snake_case ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 711
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : int = "albert"
def __init__( self , lowerCAmelCase_=3_0_0_0_0 , lowerCAmelCase_=1_2_8 , lowerCAmelCase_=4_0_9_6 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1 , lowerCAmelCase_=6_4 , lowerCAmelCase_=1_6_3_8_4 , lowerCAmelCase_=1 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_="absolute" , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , **lowerCAmelCase_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
a_ =vocab_size
a_ =embedding_size
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_hidden_groups
a_ =num_attention_heads
a_ =inner_group_num
a_ =hidden_act
a_ =intermediate_size
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =max_position_embeddings
a_ =type_vocab_size
a_ =initializer_range
a_ =layer_norm_eps
a_ =classifier_dropout_prob
a_ =position_embedding_type
class UpperCAmelCase ( __a):
'''simple docstring'''
@property
def lowercase_ ( self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
a_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 41
| 0
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowercase = pytest.mark.integration
lowercase = {'comet'}
lowercase = importlib.util.find_spec('''fairseq''') is not None
lowercase = {'code_eval'}
lowercase = os.name == 'nt'
lowercase = {'bertscore', 'frugalscore', 'perplexity'}
lowercase = importlib.util.find_spec('''transformers''') is not None
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@wraps(_lowercase )
def wrapper(self , lowercase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , _lowercase )
return wrapper
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@wraps(_lowercase )
def wrapper(self , lowercase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , _lowercase )
return wrapper
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
@wraps(_lowercase )
def wrapper(self , lowercase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , _lowercase )
return wrapper
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
@local
class UpperCAmelCase ( parameterized.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : str = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ ='''[...]'''
a_ =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , UpperCamelCase__)).module_path)
a_ =datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__)
# check parameters
a_ =inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__):
with self.use_local_metrics():
try:
a_ =doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def lowercase_ ( self , lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ ='''[...]'''
a_ =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , UpperCamelCase__)).module_path)
# run doctest
with self.use_local_metrics():
a_ =doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__):
yield
else:
yield
@contextmanager
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
def load_local_metric(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_):
return load_metric(os.path.join("metrics" , UpperCamelCase__) , *UpperCamelCase__ , **UpperCamelCase__)
with patch("datasets.load_metric") as mock_load_metric:
a_ =load_local_metric
yield
@classmethod
def lowercase_ ( cls , lowerCAmelCase_) -> int:
"""simple docstring"""
def wrapper(lowerCAmelCase_):
a_ =contextmanager(UpperCamelCase__)
a_ =patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class UpperCAmelCase ( UpperCamelCase_):
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
assert len(input_dict["input_ids"]) == 2
return np.array([1.0_3, 1.0_4])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
a_ =MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
import torch
def bert_cos_score_idf(lowercase__ , lowercase__ , *lowercase__ , **lowercase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_lowercase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
a_ =bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
def load_from_checkpoint(lowercase__ ):
class UpperCAmelCase :
'''simple docstring'''
def lowercase_ ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
assert len(UpperCamelCase__) == 2
a_ =[0.1_9, 0.9_2]
return scores, sum(UpperCamelCase__) / len(UpperCamelCase__)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
a_ =None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
a_ =load_from_checkpoint
yield
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =load_metric(os.path.join("metrics" , "seqeval" ) )
a_ ='''ERROR'''
a_ =F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
metric.compute(predictions=[] , references=[] , scheme=_lowercase )
| 712
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase_ ( lowercase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
a_ =nums[0]
for i in range(1 , len(lowercase__ ) ):
a_ =nums[i]
a_ =max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase = int(input('''Enter number of elements : ''').strip())
lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase = '''\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'''
class UpperCAmelCase ( __a):
'''simple docstring'''
@staticmethod
def lowercase_ ( lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="TensorFlow checkpoint path or folder.")
train_parser.add_argument(
"--pytorch_dump_output" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to the PyTorch saved model output.")
train_parser.add_argument("--config" , type=_SCREAMING_SNAKE_CASE , default="" , help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_SCREAMING_SNAKE_CASE)
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , ) -> Optional[Any]:
"""simple docstring"""
a_ =logging.get_logger("transformers-cli/converting")
self._logger.info(f"""Loading model {model_type}""")
a_ =model_type
a_ =tf_checkpoint
a_ =pytorch_dump_output
a_ =config
a_ =finetuning_task_name
def lowercase_ ( self) -> Dict:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
if "ckpt" in self._tf_checkpoint.lower():
a_ =self._tf_checkpoint
a_ =""
else:
a_ =self._tf_checkpoint
a_ =""
convert_transfo_xl_checkpoint_to_pytorch(
_SCREAMING_SNAKE_CASE , self._config , self._pytorch_dump_output , _SCREAMING_SNAKE_CASE)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_SCREAMING_SNAKE_CASE)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]")
| 713
|
'''simple docstring'''
import os
from math import logaa
def UpperCAmelCase_ ( lowercase__ = "base_exp.txt" ):
'''simple docstring'''
a_ =0
a_ =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
a_ , a_ =list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
a_ =x * logaa(lowercase__ )
a_ =i + 1
return result
if __name__ == "__main__":
print(solution())
| 41
| 0
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a_ =FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__lowercase , cache_dir=__lowercase)
a_ =[t[-1] for t in os.walk(os.path.join(__lowercase , os.listdir(__lowercase)[0] , "snapshots"))]
a_ =[item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin") for f in files)
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ , a_ =FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__lowercase)
a_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ =jax.random.PRNGKey(0)
a_ =4
a_ =jax.device_count()
a_ =num_samples * [prompt]
a_ =pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
a_ =replicate(__lowercase)
a_ =jax.random.split(__lowercase , __lowercase)
a_ =shard(__lowercase)
a_ =pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_5_1_4_7_4_5) < 1e-3
assert np.abs(np.abs(__lowercase , dtype=np.floataa).sum() - 4_9_9_4_7.8_7_5) < 5e-1
a_ =pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(__lowercase) == num_samples
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=__lowercase)
a_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ =jax.random.PRNGKey(0)
a_ =5_0
a_ =jax.device_count()
a_ =num_samples * [prompt]
a_ =pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
a_ =replicate(__lowercase)
a_ =jax.random.split(__lowercase , __lowercase)
a_ =shard(__lowercase)
a_ =pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_5_6_5_2_4_0_1)) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_3_8_3_8_0_8.2)) < 5e-1
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__lowercase)
a_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ =jax.random.PRNGKey(0)
a_ =5_0
a_ =jax.device_count()
a_ =num_samples * [prompt]
a_ =pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
a_ =replicate(__lowercase)
a_ =jax.random.split(__lowercase , __lowercase)
a_ =shard(__lowercase)
a_ =pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4_0_0_3_9_0_6)) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_3_7_3_5_1_6.7_5)) < 5e-1
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ , a_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa)
a_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ =jax.random.PRNGKey(0)
a_ =5_0
a_ =jax.device_count()
a_ =num_samples * [prompt]
a_ =pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
a_ =replicate(__lowercase)
a_ =jax.random.split(__lowercase , __lowercase)
a_ =shard(__lowercase)
a_ =pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4_0_0_3_9_0_6)) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_3_7_3_5_1_6.7_5)) < 5e-1
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , set_alpha_to_one=__lowercase , steps_offset=1 , )
a_ , a_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=__lowercase , safety_checker=__lowercase , )
a_ =scheduler.create_state()
a_ =scheduler_state
a_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ =jax.random.PRNGKey(0)
a_ =5_0
a_ =jax.device_count()
a_ =num_samples * [prompt]
a_ =pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
a_ =replicate(__lowercase)
a_ =jax.random.split(__lowercase , __lowercase)
a_ =shard(__lowercase)
a_ =pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4_5_0_4_3_9_4_5)) < 1e-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_3_4_7_6_9_3.5)) < 5e-1
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ =jax.device_count()
a_ =num_samples * [prompt]
a_ =jax.random.split(jax.random.PRNGKey(0) , __lowercase)
a_ , a_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__lowercase , )
a_ =replicate(__lowercase)
a_ =pipeline.prepare_inputs(__lowercase)
a_ =shard(__lowercase)
a_ =pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
a_ =images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
a_ , a_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__lowercase , use_memory_efficient_attention=__lowercase , )
a_ =replicate(__lowercase)
a_ =pipeline.prepare_inputs(__lowercase)
a_ =shard(__lowercase)
a_ =pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
a_ =images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =v.conjugate().T
a_ =v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a_ =np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 41
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=9_9 , lowerCAmelCase_=3_2 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_1_2 , lowerCAmelCase_=1_6 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Any:
"""simple docstring"""
a_ =parent
a_ =1_3
a_ =7
a_ =True
a_ =True
a_ =True
a_ =True
a_ =9_9
a_ =3_8_4
a_ =2
a_ =4
a_ =3_7
a_ ="gelu"
a_ =0.1
a_ =0.1
a_ =5_1_2
a_ =1_6
a_ =2
a_ =0.0_2
a_ =3
a_ =4
a_ =1_2_8
a_ =2
a_ =9
a_ =1
a_ =None
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ =None
if self.use_input_mask:
a_ =random_attention_mask([self.batch_size, self.seq_length])
a_ =None
if self.use_token_type_ids:
a_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ =None
a_ =None
a_ =None
if self.use_labels:
a_ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ =ids_tensor([self.batch_size] , self.num_choices)
a_ =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =TFConvBertModel(config=A_)
a_ ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a_ =[input_ids, input_mask]
a_ =model(A_)
a_ =model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =TFConvBertForMaskedLM(config=A_)
a_ ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a_ =model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
a_ =self.num_labels
a_ =TFConvBertForSequenceClassification(config=A_)
a_ ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a_ =model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =self.num_choices
a_ =TFConvBertForMultipleChoice(config=A_)
a_ =tf.tile(tf.expand_dims(A_ , 1) , (1, self.num_choices, 1))
a_ =tf.tile(tf.expand_dims(A_ , 1) , (1, self.num_choices, 1))
a_ =tf.tile(tf.expand_dims(A_ , 1) , (1, self.num_choices, 1))
a_ ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a_ =model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =self.num_labels
a_ =TFConvBertForTokenClassification(config=A_)
a_ ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a_ =model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =TFConvBertForQuestionAnswering(config=A_)
a_ ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a_ =model(A_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
(
a_
) =config_and_inputs
a_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __snake_case , __snake_case , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ : Optional[Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ : Any = False
__magic_name__ : int = False
__magic_name__ : str = False
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =TFConvBertModelTester(self)
a_ =ConfigTester(self , config_class=A_ , hidden_size=3_7)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> Dict:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_)
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_)
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs_for_common()
a_ =True
a_ =True
if hasattr(A_ , "use_cache"):
a_ =True
a_ =getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length)
a_ =getattr(self.model_tester , "key_length" , A_)
for model_class in self.all_model_classes:
a_ =self._prepare_for_class(A_ , A_)
a_ =model_class(A_)
a_ =len(model(A_))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_)
a_ =os.path.join(A_ , "saved_model" , "1")
a_ =tf.keras.models.load_model(A_)
a_ =model(A_)
if self.is_encoder_decoder:
a_ =outputs["encoder_hidden_states"]
a_ =outputs["encoder_attentions"]
else:
a_ =outputs["hidden_states"]
a_ =outputs["attentions"]
self.assertEqual(len(A_) , A_)
a_ =getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(A_) , A_)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
self.assertIsNotNone(A_)
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs_for_common()
a_ =True
a_ =getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length)
a_ =getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length)
a_ =getattr(self.model_tester , "key_length" , A_)
a_ =getattr(self.model_tester , "key_length" , A_)
def check_decoder_attentions_output(lowerCAmelCase_):
a_ =len(A_)
self.assertEqual(out_len % 2 , 0)
a_ =outputs.decoder_attentions
self.assertEqual(len(A_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCAmelCase_):
a_ =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
a_ =True
a_ =False
a_ =model_class(A_)
a_ =model(self._prepare_for_class(A_ , A_))
a_ =len(A_)
self.assertEqual(config.output_hidden_states , A_)
check_encoder_attentions_output(A_)
if self.is_encoder_decoder:
a_ =model_class(A_)
a_ =model(self._prepare_for_class(A_ , A_))
self.assertEqual(config.output_hidden_states , A_)
check_decoder_attentions_output(A_)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a_ =True
a_ =model_class(A_)
a_ =model(self._prepare_for_class(A_ , A_))
self.assertEqual(config.output_hidden_states , A_)
check_encoder_attentions_output(A_)
# Check attention is always last and order is fine
a_ =True
a_ =True
a_ =model_class(A_)
a_ =model(self._prepare_for_class(A_ , A_))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_))
self.assertEqual(model.config.output_hidden_states , A_)
check_encoder_attentions_output(A_)
@require_tf
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ =TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
a_ =tf.constant([[0, 1, 2, 3, 4, 5]])
a_ =model(A_)[0]
a_ =[1, 6, 7_6_8]
self.assertEqual(output.shape , A_)
a_ =tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
])
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4)
| 716
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =(boundary[1] - boundary[0]) / steps
a_ =boundary[0]
a_ =boundary[1]
a_ =make_points(lowercase__ , lowercase__ , lowercase__ )
a_ =0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =a + h
while x < (b - h):
yield x
a_ =x + h
def UpperCAmelCase_ ( lowercase__ ): # enter your function here
'''simple docstring'''
a_ =(x - 0) * (x - 0)
return y
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =0.0 # Lower bound of integration
a_ =1.0 # Upper bound of integration
a_ =1_0.0 # define number of steps or resolution
a_ =[a, b] # define boundary of integration
a_ =method_a(lowercase__ , lowercase__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 717
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 0
|
'''simple docstring'''
from random import randint, random
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = False , lowercase__ = 5 , ):
'''simple docstring'''
a_ =[[-1] * number_of_cells] # Create a highway without any car
a_ =0
a_ =max(__lowerCAmelCase , 0 )
while i < number_of_cells:
a_ =(
randint(0 , __lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =0
a_ =highway_now[car_index + 1 :]
for cell in range(len(__lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__lowerCAmelCase , -1 )
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =len(__lowerCAmelCase )
# Beforce calculations, the highway is empty
a_ =[-1] * number_of_cells
for car_index in range(__lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
a_ =min(highway_now[car_index] + 1 , __lowerCAmelCase )
# Number of empty cell before the next car
a_ =get_distance(__lowerCAmelCase , __lowerCAmelCase ) - 1
# We can't have the car causing an accident
a_ =min(next_highway[car_index] , __lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
a_ =max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ =len(highway[0] )
for i in range(__lowerCAmelCase ):
a_ =update(highway[i] , __lowerCAmelCase , __lowerCAmelCase )
a_ =[-1] * number_of_cells
for car_index in range(__lowerCAmelCase ):
a_ =next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
a_ =(car_index + speed) % number_of_cells
# Commit the change of position
a_ =speed
highway.append(__lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.