code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import baseaa
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCAmelCase__ ( UpperCAmelCase__ :bytes ):
'''simple docstring'''
return baseaa.baadecode(UpperCAmelCase__ ).decode("utf-8" )
if __name__ == "__main__":
A_ : List[str] = '''Hello World!'''
A_ : List[str] = baseaa_encode(test)
print(encoded)
A_ : str = baseaa_decode(encoded)
print(decoded)
| 32
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 25),)
def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a , a = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if scheduler is None:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self : Any ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
a = dummy_past_residuals[: scheduler.config.solver_order]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = UniPCMultistepScheduler(**self.get_scheduler_config() )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a = DEISMultistepScheduler.from_config(scheduler.config )
a = DPMSolverMultistepScheduler.from_config(scheduler.config )
a = UniPCMultistepScheduler.from_config(scheduler.config )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
a = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.full_loop()
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = self.full_loop(prediction_type="v_prediction" )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 32
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _lowercase ( datasets.BeamBasedBuilder ):
def A ( self : Dict ) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=__lowerCAmelCase , )
def A ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def A ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ) -> Any:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
class _lowercase ( datasets.BeamBasedBuilder ):
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=__lowerCAmelCase , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def UpperCAmelCase__ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class _lowercase ( UpperCAmelCase__ ):
@require_beam
def A ( self : List[Any] ) -> Tuple:
"""simple docstring"""
a = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
a = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
import apache_beam as beam
a = beam.io.parquetio.WriteToParquet
a = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
a = partial(__lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , "default" , "0.0.0" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , "default" , "0.0.0" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
a = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a = DummyBeamDataset(cache_dir=__lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a = NestedBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
a = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 32
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = num_stages
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = intermediate_size
a = hidden_act
a = num_labels
a = initializer_range
a = out_features
a = out_indices
a = scope
def A ( self : Optional[Any] ) -> int:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
a = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = ConvNextVaModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def A ( self : List[str] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = False
a = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : Dict ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
A_ : Any = get_logger(__name__)
class _lowercase ( enum.Enum ):
_UpperCAmelCase = '''all_checks'''
_UpperCAmelCase = '''basic_checks'''
_UpperCAmelCase = '''no_checks'''
class _lowercase ( UpperCAmelCase__ ):
pass
class _lowercase ( UpperCAmelCase__ ):
pass
class _lowercase ( UpperCAmelCase__ ):
pass
class _lowercase ( UpperCAmelCase__ ):
pass
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[dict] , UpperCAmelCase__ :dict , UpperCAmelCase__ :Union[str, Any]=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(UpperCAmelCase__ ) - set(UpperCAmelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCAmelCase__ ) - set(UpperCAmelCase__ ) ) )
if len(set(UpperCAmelCase__ ) - set(UpperCAmelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCAmelCase__ ) - set(UpperCAmelCase__ ) ) )
a = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
a = " for " + verification_name if verification_name is not None else ""
if len(UpperCAmelCase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _lowercase ( UpperCAmelCase__ ):
pass
class _lowercase ( UpperCAmelCase__ ):
pass
class _lowercase ( UpperCAmelCase__ ):
pass
class _lowercase ( UpperCAmelCase__ ):
pass
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[dict] , UpperCAmelCase__ :dict ):
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(UpperCAmelCase__ ) - set(UpperCAmelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCAmelCase__ ) - set(UpperCAmelCase__ ) ) )
if len(set(UpperCAmelCase__ ) - set(UpperCAmelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCAmelCase__ ) - set(UpperCAmelCase__ ) ) )
a = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCAmelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCAmelCase__ ) )
logger.info("All the splits matched successfully." )
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :bool = True ):
'''simple docstring'''
if record_checksum:
a = shaaaa()
with open(UpperCAmelCase__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(UpperCAmelCase__ )
a = m.hexdigest()
else:
a = None
return {"num_bytes": os.path.getsize(UpperCAmelCase__ ), "checksum": checksum}
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 32
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
a = ""
a = ""
a = []
a = 0
a = 256
a = 0
a = 0
a = 0
a = 0
def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = cva.imread(__lowerCAmelCase , 0 )
a = copy.deepcopy(self.img )
a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
a = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
a = x[i] / self.k
self.sk += prk
a = (self.L - 1) * self.sk
if self.rem != 0:
a = int(last % last )
a = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
a = int(np.ma.count(self.img ) / self.img[1].size )
a = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a = self.img[j][i]
if num != self.last_list[num]:
a = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A ( self : Any ) -> int:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A ( self : Any ) -> int:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A_ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCAmelCase__ ( ):
'''simple docstring'''
a , a = 9, 14 # noqa: F841
a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a = defaultdict(UpperCAmelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
a = mst(UpperCAmelCase__ )
a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
a = tuple(answer[:2] )
a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 32
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
a = self.unet.config.sample_size
a = (batch_size, 3, img_size, img_size)
a = self.unet
a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
a = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
a = model(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a , a = output.prev_sample, output.prev_sample_mean
a = sample_mean.clamp(0 , 1 )
a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 32
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=13 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : str=32 , __lowerCAmelCase : int=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=37 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Dict=None , ) -> Optional[int]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
a = self.vocab_size - 1
def A ( self : Dict ) -> str:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
a = OpenAIGPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = OpenAIGPTLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , *__lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
a = self.num_labels
a = OpenAIGPTForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCAmelCase = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def A ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def A ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
a = inputs_dict["labels"]
a = inputs_dict["labels"]
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : List[str] ) -> str:
"""simple docstring"""
a = OpenAIGPTModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=37 )
def A ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase )
def A ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase )
@slow
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = OpenAIGPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
a = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__lowerCAmelCase )
a = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCAmelCase ) # the president is
a = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase )
| 32
|
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 1_00
a = (5 * (century % 4) + 2) % 7
a = year % 1_00
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Any = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ['''YolosFeatureExtractor''']
A_ : Optional[int] = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''lilt'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = classifier_dropout
a = channel_shrink_ratio
a = max_ad_position_embeddings
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32
| 1
|
import logging
from transformers import PretrainedConfig
A_ : Tuple = logging.getLogger(__name__)
A_ : Dict = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''bertabs'''
def __init__( self : Optional[int] , __lowerCAmelCase : int=3_0522 , __lowerCAmelCase : Dict=512 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[Any]=0.2 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Tuple=768 , __lowerCAmelCase : int=8 , __lowerCAmelCase : str=2048 , __lowerCAmelCase : Any=0.2 , **__lowerCAmelCase : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = vocab_size
a = max_pos
a = enc_layers
a = enc_hidden_size
a = enc_heads
a = enc_ff_size
a = enc_dropout
a = dec_layers
a = dec_hidden_size
a = dec_heads
a = dec_ff_size
a = dec_dropout
| 32
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
a = log_spec[:, :-1]
a = log_spec - 2_0.0
a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 32
| 1
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Dict=99 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=50 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[Any]=None , ) -> Any:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = initializer_range
a = use_labels
a = scope
def A ( self : int ) -> str:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.get_config()
return config, input_ids, input_mask, token_labels
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Dict ) -> Tuple:
"""simple docstring"""
(
(
a
) , (
a
) , (
a
) , (
a
) ,
) = self.prepare_config_and_inputs()
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , **__lowerCAmelCase : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
a = BertGenerationEncoder(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Tuple , ) -> Dict:
"""simple docstring"""
a = True
a = BertGenerationEncoder(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : str , ) -> Any:
"""simple docstring"""
a = True
a = True
a = BertGenerationDecoder(config=__lowerCAmelCase ).to(__lowerCAmelCase ).eval()
# first forward pass
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["hidden_states"][0]
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def A ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
a = BertGenerationDecoder(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a , a , a , a = self.prepare_config_and_inputs()
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_UpperCAmelCase = (BertGenerationDecoder,) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def A ( self : Any ) -> Dict:
"""simple docstring"""
a = BertGenerationEncoderTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = "bert"
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def A ( self : Any ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowerCAmelCase )
def A ( self : str ) -> int:
"""simple docstring"""
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a = None
self.model_tester.create_and_check_model_as_decoder(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : List[Any] ) -> List[str]:
"""simple docstring"""
a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any:
"""simple docstring"""
a = parent
a = batch_size
a = is_training
a = use_auxiliary_loss
a = num_queries
a = num_channels
a = min_size
a = max_size
a = num_labels
a = mask_feature_size
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A ( self : str ) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
a , a , a , a , a = self.prepare_config_and_inputs()
a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
a = output.encoder_hidden_states
a = output.pixel_decoder_hidden_states
a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
a = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = MaskFormerModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : int ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
a = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A ( self : str ) -> Dict:
"""simple docstring"""
a = (self.model_tester.min_size,) * 2
a = {
"pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def A ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = True
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : int = 1E-4
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : int ) -> Optional[int]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
a = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : int ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a = inputs["pixel_values"].to(__lowerCAmelCase )
a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]]
a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 32
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase = '''BridgeTowerImageProcessor'''
_UpperCAmelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Any , ) -> BatchEncoding:
"""simple docstring"""
a = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel_values + pixel_mask
a = self.image_processor(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , do_normalize=__lowerCAmelCase , do_center_crop=__lowerCAmelCase , **__lowerCAmelCase )
encoding.update(__lowerCAmelCase )
return encoding
def A ( self : Optional[int] , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self : str , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 32
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def A ( self : int ) -> Any:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 32
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = str(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" )
def UpperCAmelCase__ ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
a = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
a = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :int ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 32
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' )
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 32
| 1
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : str ) -> str:
"""simple docstring"""
a = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
a = AutoTokenizer.from_pretrained("xlm-roberta-base" )
a = "The dog is cute and lives in the garden house"
a = jnp.array([tokenizer.encode(__lowerCAmelCase )] )
a = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
a = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
a = model(__lowerCAmelCase )["last_hidden_state"]
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1E-3 ) )
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''lilt'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = classifier_dropout
a = channel_shrink_ratio
a = max_ad_position_embeddings
| 32
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ :Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
A_ : List[str] = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class _lowercase ( UpperCAmelCase__ ):
@staticmethod
def A ( __lowerCAmelCase : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
a = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=__lowerCAmelCase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : Dict , ) -> Tuple:
"""simple docstring"""
a = logging.get_logger("transformers-cli/converting" )
self._logger.info(f"""Loading model {model_type}""" )
a = model_type
a = tf_checkpoint
a = pytorch_dump_output
a = config
a = finetuning_task_name
def A ( self : int ) -> Dict:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
a = self._tf_checkpoint
a = ""
else:
a = self._tf_checkpoint
a = ""
convert_transfo_xl_checkpoint_to_pytorch(
__lowerCAmelCase , self._config , self._pytorch_dump_output , __lowerCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 32
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = TaConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 32
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowercase ( UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
a = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowerCAmelCase , use_timestep_embedding=__lowerCAmelCase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
a = IPNDMScheduler()
a = {
"unet": unet,
"scheduler": scheduler,
}
return components
def A ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=0 ) -> Tuple:
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("mps" ):
a = torch.manual_seed(__lowerCAmelCase )
else:
a = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
a = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
a = "cpu" # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = DanceDiffusionPipeline(**__lowerCAmelCase )
a = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
a = self.get_dummy_inputs(__lowerCAmelCase )
a = pipe(**__lowerCAmelCase )
a = output.audios
a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
a = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def A ( self : Dict ) -> List[str]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def A ( self : int ) -> List[str]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def A ( self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
a = torch_device
a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
a = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
a = torch.manual_seed(0 )
a = pipe(generator=__lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
a = output.audios
a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
a = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
a = torch_device
a = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
a = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
a = torch.manual_seed(0 )
a = pipe(generator=__lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
a = output.audios
a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
a = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :list[int] ):
'''simple docstring'''
if not nums:
return 0
a = nums[0]
a = 0
for num in nums[1:]:
a , a = (
max_excluding + num,
max(UpperCAmelCase__ , UpperCAmelCase__ ),
)
return max(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowercase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None:
"""simple docstring"""
a = None
for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ):
a = Node(__lowerCAmelCase , self.head )
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
"""simple docstring"""
a = self.head
while node:
yield node.data
a = node.next_node
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(__lowerCAmelCase ) for node in self] )
def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 32
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a = create_tensor(UpperCAmelCase__ )
a = gather(UpperCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] ):
'''simple docstring'''
a = [state.process_index]
a = gather_object(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == state.num_processes, F"""{gathered_obj}, {len(UpperCAmelCase__ )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
a = create_tensor(UpperCAmelCase__ )
a = broadcast(UpperCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ):
'''simple docstring'''
if state.is_main_process:
a = torch.arange(state.num_processes + 1 ).to(state.device )
else:
a = torch.arange(state.num_processes ).to(state.device )
a = pad_across_processes(UpperCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
a = create_tensor(UpperCAmelCase__ )
a = reduce(UpperCAmelCase__ , "sum" )
a = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}"""
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
a = create_tensor(UpperCAmelCase__ )
a = reduce(UpperCAmelCase__ , "mean" )
a = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), F"""{reduced_tensor} != {truth_tensor}"""
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
main()
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = PartialState()
state.print(F"""State: {state}""" )
state.print("testing gather" )
test_gather(UpperCAmelCase__ )
state.print("testing gather_object" )
test_gather_object(UpperCAmelCase__ )
state.print("testing broadcast" )
test_broadcast(UpperCAmelCase__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(UpperCAmelCase__ )
state.print("testing reduce_sum" )
test_reduce_sum(UpperCAmelCase__ )
state.print("testing reduce_mean" )
test_reduce_mean(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 32
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 32
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A_ : List[Any] = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A_ : Optional[int] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''maskformer'''
_UpperCAmelCase = {'''hidden_size''': '''mask_feature_size'''}
_UpperCAmelCase = ['''resnet''', '''swin''']
_UpperCAmelCase = ['''detr''']
def __init__( self : Tuple , __lowerCAmelCase : int = 256 , __lowerCAmelCase : int = 256 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[Dict] = None , __lowerCAmelCase : Optional[Dict] = None , __lowerCAmelCase : float = 0.0_2 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : float = 2_0.0 , __lowerCAmelCase : Optional[bool] = None , **__lowerCAmelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a = backbone_config.pop("model_type" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
a = DetrConfig()
else:
# verify that the decoder is supported
a = (
decoder_config.pop("model_type" ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a = CONFIG_MAPPING[decoder_type]
a = config_class.from_dict(__lowerCAmelCase )
a = backbone_config
a = decoder_config
# main feature dimension for the model
a = fpn_feature_size
a = mask_feature_size
# initializer
a = init_std
a = init_xavier_std
# Hungarian matcher && loss
a = cross_entropy_weight
a = dice_weight
a = mask_weight
a = use_auxiliary_loss
a = no_object_weight
a = output_auxiliary_logits
a = self.decoder_config.encoder_attention_heads
a = self.decoder_config.num_hidden_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A ( cls : Optional[int] , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , decoder_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self : Any ) -> Dict[str, any]:
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.backbone_config.to_dict()
a = self.decoder_config.to_dict()
a = self.__class__.model_type
return output
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : str = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ):
_UpperCAmelCase = '''focalnet'''
def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = use_conv_embed
a = hidden_sizes
a = depths
a = focal_levels
a = focal_windows
a = hidden_act
a = mlp_ratio
a = hidden_dropout_prob
a = drop_path_rate
a = use_layerscale
a = layerscale_value
a = use_post_layernorm
a = use_post_layernorm_in_modulation
a = normalize_modulator
a = initializer_range
a = layer_norm_eps
a = encoder_stride
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 32
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ : Optional[int] = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
a , a = head.next, head
while fast and fast.next:
a = fast.next.next
a = slow.next
a = slow.next
a = None # Don't forget here! But forget still works!
# reverse the second part
a = None
while second:
a = second.next
a = node
a = second
a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a = node.next
a = head.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a = a = a = head
while fast and fast.next:
a , a = fast.next.next, slow.next
# 2. Push the second half into the stack
a = [slow.val]
while slow.next:
a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a = cur.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head or not head.next:
return True
a = {}
a = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase__ )
else:
a = [pos]
a = head.next
pos += 1
a = pos - 1
a = 0
for v in d.values():
if len(UpperCAmelCase__ ) % 2 != 0:
middle += 1
else:
a = 0
for i in range(0 , len(UpperCAmelCase__ ) ):
if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 32
| 1
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 32
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = embedding_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
a = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = self.num_choices
a = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = MobileBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def A ( self : List[Any] ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self : int ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
A_ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase )
a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 32
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( UpperCAmelCase__ ):
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a = input_file.read()
a = regexp.search(__lowerCAmelCase )
return match
def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a = regexp.finditer(__lowerCAmelCase )
a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 32
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = field(default='''language-modeling''', metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase = Features({'''text''': Value('''string''' )} )
_UpperCAmelCase = Features({} )
_UpperCAmelCase = "text"
@property
def A ( self : int ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 32
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
| 1
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class _lowercase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class _lowercase ( UpperCAmelCase__ ):
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
a = {}
a = []
a = 1
a = [1, 2]
a = {"a": 1, "b": 2}
a = {"a": [1, 2], "b": [3, 4]}
a = {"a": {"1": 1}, "b": 2}
a = {"a": 1, "b": 2, "c": 3, "d": 4}
a = {}
a = []
a = 2
a = [2, 3]
a = {"a": 2, "b": 3}
a = {"a": [2, 3], "b": [4, 5]}
a = {"a": {"1": 2}, "b": 3}
a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
a = 2
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
a = {"a": 2, "b": 0, "c": 2}
a = {
"a": np.eye(2 ).astype(__lowerCAmelCase ),
"b": np.zeros(3 ).astype(__lowerCAmelCase ),
"c": np.ones(2 ).astype(__lowerCAmelCase ),
}
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , map_numpy=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowerCAmelCase , __lowerCAmelCase , map_numpy=__lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , map_numpy=__lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowerCAmelCase , __lowerCAmelCase , map_numpy=__lowerCAmelCase , num_proc=__lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda __lowerCAmelCase : x + 1 , __lowerCAmelCase , num_proc=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
a = {"a": 1, "b": 2}
a = {"a": 3, "b": 4}
a = {"a": 5, "b": 6}
a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ) , __lowerCAmelCase )
def A ( self : Dict ) -> Dict:
"""simple docstring"""
class _lowercase :
_UpperCAmelCase = '''bar'''
a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(__lowerCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :str , UpperCAmelCase__ :str ):
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
a = {F"""{i}""": i for i in range(UpperCAmelCase__ )}
a = map_nested(lambda UpperCAmelCase__ : x + 10 , UpperCAmelCase__ , num_proc=UpperCAmelCase__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _lowercase ( UpperCAmelCase__ ):
@require_tf
def A ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
a = layers.Dense(2 )
def gen_random_output():
a = tf.random.uniform((1, 3) )
return model(__lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=__lowerCAmelCase ):
a = gen_random_output()
with temp_seed(42 , set_tensorflow=__lowerCAmelCase ):
a = gen_random_output()
a = gen_random_output()
np.testing.assert_equal(__lowerCAmelCase , __lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A ( self : Dict ) -> Tuple:
"""simple docstring"""
import torch
def gen_random_output():
a = torch.nn.Linear(3 , 2 )
a = torch.rand(1 , 3 )
return model(__lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=__lowerCAmelCase ):
a = gen_random_output()
with temp_seed(42 , set_pytorch=__lowerCAmelCase ):
a = gen_random_output()
a = gen_random_output()
np.testing.assert_equal(__lowerCAmelCase , __lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
a = gen_random_output()
with temp_seed(42 ):
a = gen_random_output()
a = gen_random_output()
np.testing.assert_equal(__lowerCAmelCase , __lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
a = NestedDataStructure(UpperCAmelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = NestedDataStructure(UpperCAmelCase__ ).flatten()
assert output == expected_output
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = A(x=1 , y="foobar" )
a = {"x": 1, "y": "foobar"}
assert asdict(UpperCAmelCase__ ) == expected_output
a = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(UpperCAmelCase__ ) == expected_output
with pytest.raises(UpperCAmelCase__ ):
asdict([1, A(x=10 , y="foo" )] )
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
return text.split()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCAmelCase__ ( ):
'''simple docstring'''
with Pool(2 ) as pool:
a = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCAmelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
a = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCAmelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
a = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(UpperCAmelCase__ ) == 4
| 32
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 25),)
def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a , a = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if scheduler is None:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self : Any ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
a = dummy_past_residuals[: scheduler.config.solver_order]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = UniPCMultistepScheduler(**self.get_scheduler_config() )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a = DEISMultistepScheduler.from_config(scheduler.config )
a = DPMSolverMultistepScheduler.from_config(scheduler.config )
a = UniPCMultistepScheduler.from_config(scheduler.config )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
a = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.full_loop()
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = self.full_loop(prediction_type="v_prediction" )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 32
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : str ) -> Optional[int]:
"""simple docstring"""
a = "ylacombe/bark-small"
a = tempfile.mkdtemp()
a = "en_speaker_1"
a = "This is a test string"
a = "speaker_embeddings_path.json"
a = "speaker_embeddings"
def A ( self : Any , **__lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : Any ) -> List[Any]:
"""simple docstring"""
a = self.get_tokenizer()
a = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def A ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a = 35
a = 2
a = 8
a = {
"semantic_prompt": np.ones(__lowerCAmelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
a = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
a = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
a = os.path.join(self.tmpdirname , "file.npz" )
np.savez(__lowerCAmelCase , **__lowerCAmelCase )
a = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
a = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
a = processor(text=self.input_string , voice_preset=self.voice_preset )
def A ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
a = self.get_tokenizer()
a = BarkProcessor(tokenizer=__lowerCAmelCase )
a = processor(text=self.input_string )
a = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 32
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = num_stages
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = intermediate_size
a = hidden_act
a = num_labels
a = initializer_range
a = out_features
a = out_indices
a = scope
def A ( self : Optional[Any] ) -> int:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
a = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = ConvNextVaModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def A ( self : List[str] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = False
a = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : Dict ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32
| 1
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowercase ( unittest.TestCase ):
def A ( self : Tuple ) -> int:
"""simple docstring"""
a = get_activation("swish" )
self.assertIsInstance(__lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A ( self : Optional[Any] ) -> int:
"""simple docstring"""
a = get_activation("silu" )
self.assertIsInstance(__lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
a = get_activation("mish" )
self.assertIsInstance(__lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
a = get_activation("gelu" )
self.assertIsInstance(__lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 32
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
a = ""
a = ""
a = []
a = 0
a = 256
a = 0
a = 0
a = 0
a = 0
def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = cva.imread(__lowerCAmelCase , 0 )
a = copy.deepcopy(self.img )
a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
a = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
a = x[i] / self.k
self.sk += prk
a = (self.L - 1) * self.sk
if self.rem != 0:
a = int(last % last )
a = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
a = int(np.ma.count(self.img ) / self.img[1].size )
a = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a = self.img[j][i]
if num != self.last_list[num]:
a = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A ( self : Any ) -> int:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A ( self : Any ) -> int:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A_ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32
| 1
|
import numpy as np
import datasets
A_ : Tuple = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
A_ : List[Any] = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
A_ : Optional[Any] = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def A ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = np.array(__lowerCAmelCase )
a = np.array(__lowerCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
a = X - np.mean(__lowerCAmelCase )
a = np.cov(reference_distribution.T )
try:
a = np.linalg.inv(__lowerCAmelCase )
except np.linalg.LinAlgError:
a = np.linalg.pinv(__lowerCAmelCase )
a = np.dot(__lowerCAmelCase , __lowerCAmelCase )
a = np.dot(__lowerCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 32
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
a = self.unet.config.sample_size
a = (batch_size, 3, img_size, img_size)
a = self.unet
a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
a = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
a = model(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a , a = output.prev_sample, output.prev_sample_mean
a = sample_mean.clamp(0 , 1 )
a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 32
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A_ : Optional[Any] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''input_features''']
def __init__( self : int , __lowerCAmelCase : Dict=80 , __lowerCAmelCase : int=1_6000 , __lowerCAmelCase : Optional[Any]=160 , __lowerCAmelCase : Union[str, Any]=30 , __lowerCAmelCase : Union[str, Any]=400 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Union[str, Any]=False , **__lowerCAmelCase : Union[str, Any] , ) -> str:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
a = n_fft
a = hop_length
a = chunk_length
a = chunk_length * sampling_rate
a = self.n_samples // hop_length
a = sampling_rate
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , )
def A ( self : Optional[Any] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
a = log_spec[:, :-1]
a = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0 )
a = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def A ( __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
a = np.array(__lowerCAmelCase , np.intaa )
a = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1 ) ):
a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a = padding_value
normed_input_values.append(__lowerCAmelCase )
else:
a = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : str , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[str] = "max_length" , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , **__lowerCAmelCase : str , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
a = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
a = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
a = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
a = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
a = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
a = [self._np_extract_fbank_features(__lowerCAmelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
else:
a = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
a = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
a = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
def A ( self : str ) -> Dict[str, Any]:
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 32
|
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 1_00
a = (5 * (century % 4) + 2) % 7
a = year % 1_00
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Dict = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''funnel'''
_UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Tuple , __lowerCAmelCase : Tuple=3_0522 , __lowerCAmelCase : Optional[Any]=[4, 4, 4] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[int]=3072 , __lowerCAmelCase : str="gelu_new" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1E-9 , __lowerCAmelCase : Any="mean" , __lowerCAmelCase : List[str]="relative_shift" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : str , ) -> int:
"""simple docstring"""
a = vocab_size
a = block_sizes
a = [1] * len(__lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(__lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
a = num_decoder_layers
a = d_model
a = n_head
a = d_head
a = d_inner
a = hidden_act
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = initializer_range
a = initializer_std
a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
a = attention_type
a = separate_cls
a = truncate_seq
a = pool_q_only
super().__init__(**__lowerCAmelCase )
@property
def A ( self : Tuple ) -> Any:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def A ( self : str , __lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def A ( self : List[str] , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 32
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
| 1
|
from math import isqrt
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCAmelCase__ , UpperCAmelCase__ ):
a = False
return [i for i in range(2 , UpperCAmelCase__ ) if is_prime[i]]
def UpperCAmelCase__ ( UpperCAmelCase__ :int = 10**8 ):
'''simple docstring'''
a = calculate_prime_numbers(max_number // 2 )
a = 0
a = 0
a = len(UpperCAmelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
A_ : Optional[Any] = logging.get_logger(__name__)
# General docstring
A_ : Optional[int] = '''RegNetConfig'''
# Base docstring
A_ : Union[str, Any] = '''facebook/regnet-y-040'''
A_ : int = [1, 10_88, 7, 7]
# Image classification docstring
A_ : Any = '''facebook/regnet-y-040'''
A_ : Dict = '''tabby, tabby cat'''
A_ : Optional[Any] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowercase ( nn.Module ):
def __init__( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[str] = "relu" , ) -> str:
"""simple docstring"""
super().__init__()
a = nn.Convad(
__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , groups=__lowerCAmelCase , bias=__lowerCAmelCase , )
a = nn.BatchNormad(__lowerCAmelCase )
a = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Tuple , __lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
a = self.convolution(__lowerCAmelCase )
a = self.normalization(__lowerCAmelCase )
a = self.activation(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Optional[Any] , __lowerCAmelCase : RegNetConfig ) -> Dict:
"""simple docstring"""
super().__init__()
a = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
a = config.num_channels
def A ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
a = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
a = self.embedder(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
a = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase )
a = nn.BatchNormad(__lowerCAmelCase )
def A ( self : Any , __lowerCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
a = self.convolution(__lowerCAmelCase )
a = self.normalization(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
super().__init__()
a = nn.AdaptiveAvgPoolad((1, 1) )
a = nn.Sequential(
nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def A ( self : str , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = self.pooler(__lowerCAmelCase )
a = self.attention(__lowerCAmelCase )
a = hidden_state * attention
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Optional[int] , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 ) -> Any:
"""simple docstring"""
super().__init__()
a = in_channels != out_channels or stride != 1
a = max(1 , out_channels // config.groups_width )
a = (
RegNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
a = nn.Sequential(
RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , )
a = ACTaFN[config.hidden_act]
def A ( self : List[str] , __lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
a = hidden_state
a = self.layer(__lowerCAmelCase )
a = self.shortcut(__lowerCAmelCase )
hidden_state += residual
a = self.activation(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Optional[Any] , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
a = in_channels != out_channels or stride != 1
a = max(1 , out_channels // config.groups_width )
a = (
RegNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
a = nn.Sequential(
RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act ) , RegNetSELayer(__lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , )
a = ACTaFN[config.hidden_act]
def A ( self : Tuple , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
a = hidden_state
a = self.layer(__lowerCAmelCase )
a = self.shortcut(__lowerCAmelCase )
hidden_state += residual
a = self.activation(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Any , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
a = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
a = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for _ in range(depth - 1 )] , )
def A ( self : Tuple , __lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
a = self.layers(__lowerCAmelCase )
return hidden_state
class _lowercase ( nn.Module ):
def __init__( self : Tuple , __lowerCAmelCase : RegNetConfig ) -> str:
"""simple docstring"""
super().__init__()
a = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) )
def A ( self : int , __lowerCAmelCase : Tensor , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a = hidden_states + (hidden_state,)
a = stage_module(__lowerCAmelCase )
if output_hidden_states:
a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = RegNetConfig
_UpperCAmelCase = '''regnet'''
_UpperCAmelCase = '''pixel_values'''
_UpperCAmelCase = True
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
if isinstance(__lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a = value
A_ : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A_ : str = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', UpperCAmelCase__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : List[Any] , __lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
a = config
a = RegNetEmbeddings(__lowerCAmelCase )
a = RegNetEncoder(__lowerCAmelCase )
a = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Tuple , __lowerCAmelCase : Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.embedder(__lowerCAmelCase )
a = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
a = encoder_outputs[0]
a = self.pooler(__lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', UpperCAmelCase__, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Tuple , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
a = config.num_labels
a = RegNetModel(__lowerCAmelCase )
# classification head
a = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : List[str] , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.LongTensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.regnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
a = outputs.pooler_output if return_dict else outputs[1]
a = self.classifier(__lowerCAmelCase )
a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a = "single_label_classification"
else:
a = "multi_label_classification"
if self.config.problem_type == "regression":
a = MSELoss()
if self.num_labels == 1:
a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
a = CrossEntropyLoss()
a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a = BCEWithLogitsLoss()
a = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
a = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
| 32
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
a = log_spec[:, :-1]
a = log_spec - 2_0.0
a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 32
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : str = logging.get_logger(__name__)
A_ : Optional[int] = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
A_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(UpperCAmelCase__ )
a = importlib.import_module(F""".{module_name}""" , "transformers.models" )
try:
return getattr(UpperCAmelCase__ , UpperCAmelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(UpperCAmelCase__ , "__name__" , UpperCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module("transformers" )
if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ):
return getattr(UpperCAmelCase__ , UpperCAmelCase__ )
return None
def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, os.PathLike] , UpperCAmelCase__ :Optional[Union[str, os.PathLike]] = None , UpperCAmelCase__ :bool = False , UpperCAmelCase__ :bool = False , UpperCAmelCase__ :Optional[Dict[str, str]] = None , UpperCAmelCase__ :Optional[Union[bool, str]] = None , UpperCAmelCase__ :Optional[str] = None , UpperCAmelCase__ :bool = False , **UpperCAmelCase__ :List[str] , ):
'''simple docstring'''
a = get_file_from_repo(
UpperCAmelCase__ , UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , resume_download=UpperCAmelCase__ , proxies=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , revision=UpperCAmelCase__ , local_files_only=UpperCAmelCase__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(UpperCAmelCase__ , encoding="utf-8" ) as reader:
return json.load(UpperCAmelCase__ )
class _lowercase :
def __init__( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(__lowerCAmelCase )
def A ( cls : List[str] , __lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a = kwargs.pop("config" , __lowerCAmelCase )
a = kwargs.pop("trust_remote_code" , __lowerCAmelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__lowerCAmelCase , **__lowerCAmelCase )
a = config_dict.get("feature_extractor_type" , __lowerCAmelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
a = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a = AutoConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__lowerCAmelCase , "feature_extractor_type" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__lowerCAmelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
a = kwargs.pop("code_revision" , __lowerCAmelCase )
if os.path.isdir(__lowerCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__lowerCAmelCase )]
return feature_extractor_class.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def A ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__lowerCAmelCase , __lowerCAmelCase )
| 32
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any:
"""simple docstring"""
a = parent
a = batch_size
a = is_training
a = use_auxiliary_loss
a = num_queries
a = num_channels
a = min_size
a = max_size
a = num_labels
a = mask_feature_size
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A ( self : str ) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
a , a , a , a , a = self.prepare_config_and_inputs()
a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
a = output.encoder_hidden_states
a = output.pixel_decoder_hidden_states
a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
a = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = MaskFormerModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : int ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
a = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A ( self : str ) -> Dict:
"""simple docstring"""
a = (self.model_tester.min_size,) * 2
a = {
"pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def A ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = True
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : int = 1E-4
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : int ) -> Optional[int]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
a = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : int ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a = inputs["pixel_values"].to(__lowerCAmelCase )
a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]]
a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 32
| 1
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
a = []
a , a = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
a = result + left + right
return input_list
def UpperCAmelCase__ ( UpperCAmelCase__ :list ):
'''simple docstring'''
if len(UpperCAmelCase__ ) <= 1:
return input_list
a = list(UpperCAmelCase__ )
# iteration for two-way merging
a = 2
while p <= len(UpperCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ ):
a = i
a = i + p - 1
a = (low + high + 1) // 2
a = merge(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(UpperCAmelCase__ ):
a = i
a = merge(UpperCAmelCase__ , 0 , UpperCAmelCase__ , len(UpperCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
A_ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
A_ : Dict = []
else:
A_ : int = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 32
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def A ( self : int ) -> Any:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 32
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = parent
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return {}
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
a = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = MarkupLMFeatureExtractor if is_bsa_available() else None
def A ( self : int ) -> List[Any]:
"""simple docstring"""
a = MarkupLMFeatureExtractionTester(self )
@property
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def A ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
a = self.feature_extraction_class()
# Test not batched input
a = get_html_strings()[0]
a = feature_extractor(__lowerCAmelCase )
# fmt: off
a = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
a = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , __lowerCAmelCase )
self.assertEqual(encoding.xpaths , __lowerCAmelCase )
# Test batched
a = get_html_strings()
a = feature_extractor(__lowerCAmelCase )
# fmt: off
a = expected_nodes + [["My First Heading", "My first paragraph."]]
a = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __lowerCAmelCase )
self.assertEqual(encoding.xpaths , __lowerCAmelCase )
| 32
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = str(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" )
def UpperCAmelCase__ ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
a = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
a = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :str , UpperCAmelCase__ :List[Any]=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
a = nn.Parameter(UpperCAmelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
a = nn.Parameter(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :int ):
'''simple docstring'''
a = np.asarray(weights[0] )
a = np.asarray(weights[1] )
a = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase__ ).view(-1 , UpperCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Any , UpperCAmelCase__ :Tuple ):
'''simple docstring'''
a = np.asarray(weights[0] )
a = np.asarray(weights[1] )
a = np.asarray(weights[2] )
a = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase__ ).view(-1 , UpperCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
a = weights[0][0][0]
a = np.asarray(layer_norm_a[0] )
a = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , )
# lsh weights + output
a = weights[0][1]
if len(UpperCAmelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCAmelCase__ , torch_block.attention , UpperCAmelCase__ )
else:
set_layer_weights_in_torch_local(UpperCAmelCase__ , torch_block.attention , UpperCAmelCase__ )
# intermediate weighs
a = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCAmelCase__ ) == 4:
a = intermediate_weights[2]
# layernorm 2
a = np.asarray(intermediate_weights[0][0] )
a = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , )
# intermediate dense
a = np.asarray(intermediate_weights[1][0] )
a = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , )
# intermediate out
a = np.asarray(intermediate_weights[4][0] )
a = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :int ):
'''simple docstring'''
a = torch_model.reformer
# word embeds
a = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCAmelCase__ ) , )
if isinstance(weights[3] , UpperCAmelCase__ ):
a = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
a = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
a = nn.Parameter(torch.tensor(UpperCAmelCase__ ) )
a = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCAmelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
a = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# output layer norm
a = np.asarray(weights[7][0] )
a = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , )
# output embeddings
a = np.asarray(weights[9][0] )
a = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , )
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Dict ):
'''simple docstring'''
a = ReformerConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a = ReformerModelWithLMHead(UpperCAmelCase__ )
with open(UpperCAmelCase__ , "rb" ) as f:
a = pickle.load(UpperCAmelCase__ )["weights"]
set_model_weights_in_torch(UpperCAmelCase__ , UpperCAmelCase__ , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ : Optional[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 32
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' )
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 32
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
a = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("RGB" )
return image
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :Any , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = dct.pop(UpperCAmelCase__ )
a = val
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :int ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
a = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
a = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase__ , requires_grad=UpperCAmelCase__ ), v_bias) )
a = qkv_bias
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ):
'''simple docstring'''
a = 3_64 if "coco" in model_name else 2_24
a = InstructBlipVisionConfig(image_size=UpperCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
a = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
a = InstructBlipConfig(vision_config=UpperCAmelCase__ , text_config=UpperCAmelCase__ , qformer_config=UpperCAmelCase__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Union[str, Any]=None , UpperCAmelCase__ :List[str]=False ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
a = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
a , a = get_blipa_config(UpperCAmelCase__ )
a = InstructBlipForConditionalGeneration(UpperCAmelCase__ ).eval()
a = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
a , a = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
a = "cuda:1" if torch.cuda.is_available() else "cpu"
a = "cuda:2" if torch.cuda.is_available() else "cpu"
a , a , a = load_model_and_preprocess(
name=UpperCAmelCase__ , model_type=UpperCAmelCase__ , is_eval=UpperCAmelCase__ , device=UpperCAmelCase__ )
original_model.eval()
print("Done!" )
# update state dict keys
a = original_model.state_dict()
a = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a = state_dict.pop(UpperCAmelCase__ )
if key.startswith("Qformer.bert" ):
a = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
a = key.replace("self" , "attention" )
if "llm_proj" in key:
a = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
a = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
a = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
a = key.replace("t5" , "language" )
a = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase__ , UpperCAmelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
a = load_demo_image()
a = "What is unusual about this image?"
# create processor
a = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
a = InstructBlipProcessor(
image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ , )
a = processor(images=UpperCAmelCase__ , text=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# make sure processor creates exact same pixel values
a = vis_processors["eval"](UpperCAmelCase__ ).unsqueeze(0 ).to(UpperCAmelCase__ )
a = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
hf_model.to(UpperCAmelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
a = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
a = hf_model(**UpperCAmelCase__ ).logits
else:
a = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
a = tokenizer("\n" , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ )
a = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
a = hf_model(**UpperCAmelCase__ , labels=UpperCAmelCase__ ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a = 1E-4 if "vicuna" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCAmelCase__ , atol=UpperCAmelCase__ )
print("Looks ok!" )
print("Generating with original model..." )
a = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
a = hf_model.generate(
**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a = 2
print("Original generation:" , UpperCAmelCase__ )
a = processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
a = [text.strip() for text in output_text]
print("HF generation:" , UpperCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
A_ : List[str] = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A_ : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''lilt'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = classifier_dropout
a = channel_shrink_ratio
a = max_ad_position_embeddings
| 32
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
a = self.unet.config.sample_size
a = (batch_size, 3, img_size, img_size)
a = self.unet
a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
a = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
a = model(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a , a = output.prev_sample, output.prev_sample_mean
a = sample_mean.clamp(0 , 1 )
a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 32
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = TaConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 32
| 1
|
A_ : Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A_ : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A_ : Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :list[int] , UpperCAmelCase__ :int ):
'''simple docstring'''
a = 0
a = len(UpperCAmelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
a = i + 1
else:
a = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 32
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowercase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None:
"""simple docstring"""
a = None
for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ):
a = Node(__lowerCAmelCase , self.head )
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
"""simple docstring"""
a = self.head
while node:
yield node.data
a = node.next_node
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(__lowerCAmelCase ) for node in self] )
def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 32
| 1
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : int = logging.get_logger(__name__)
A_ : List[Any] = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
A_ : str = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
A_ : List[Any] = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
A_ : Optional[Any] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
A_ : List[str] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
A_ : int = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
A_ : str = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
A_ : int = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
A_ : Dict = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
A_ : Optional[Any] = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
A_ : Any = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModel)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[int] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Any = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 32
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 32
| 1
|
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :float ):
'''simple docstring'''
return np.where(vector > 0 , UpperCAmelCase__ , (alpha * (np.exp(UpperCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : str = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ):
_UpperCAmelCase = '''focalnet'''
def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = use_conv_embed
a = hidden_sizes
a = depths
a = focal_levels
a = focal_windows
a = hidden_act
a = mlp_ratio
a = hidden_dropout_prob
a = drop_path_rate
a = use_layerscale
a = layerscale_value
a = use_post_layernorm
a = use_post_layernorm_in_modulation
a = normalize_modulator
a = initializer_range
a = layer_norm_eps
a = encoder_stride
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 32
| 1
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = TaConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
a , a = head.next, head
while fast and fast.next:
a = fast.next.next
a = slow.next
a = slow.next
a = None # Don't forget here! But forget still works!
# reverse the second part
a = None
while second:
a = second.next
a = node
a = second
a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a = node.next
a = head.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a = a = a = head
while fast and fast.next:
a , a = fast.next.next, slow.next
# 2. Push the second half into the stack
a = [slow.val]
while slow.next:
a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a = cur.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head or not head.next:
return True
a = {}
a = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase__ )
else:
a = [pos]
a = head.next
pos += 1
a = pos - 1
a = 0
for v in d.values():
if len(UpperCAmelCase__ ) % 2 != 0:
middle += 1
else:
a = 0
for i in range(0 , len(UpperCAmelCase__ ) ):
if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 32
| 1
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
A_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
A_ , A_ : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
A_ : Optional[Any] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
A_ : List[Any] = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
A_ : Tuple = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 32
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = embedding_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
a = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = self.num_choices
a = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = MobileBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def A ( self : List[Any] ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self : int ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
A_ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase )
a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 32
| 1
|
class _lowercase :
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ) -> int:
"""simple docstring"""
a = None
a = None
a = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
a = len(__lowerCAmelCase )
a = None
def A ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
if sources is int:
a = [sources]
if sinks is int:
a = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
a = sources[0]
a = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
a = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
a = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
a = max_input_flow
a = 0
a = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
a = max_input_flow
a = size - 1
def A ( self : Any ) -> Tuple:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A ( self : Optional[int] , __lowerCAmelCase : int ) -> int:
"""simple docstring"""
a = algorithm(self )
class _lowercase :
def __init__( self : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
a = flow_network
a = flow_network.verticesCount
a = flow_network.sourceIndex
a = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
a = flow_network.graph
a = False
def A ( self : Any ) -> Dict:
"""simple docstring"""
if not self.executed:
self._algorithm()
a = True
def A ( self : Any ) -> Tuple:
"""simple docstring"""
pass
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Dict , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
a = -1
def A ( self : Any ) -> List[str]:
"""simple docstring"""
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Any , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
a = [[0] * self.verticies_count for i in range(self.verticies_count )]
a = [0] * self.verticies_count
a = [0] * self.verticies_count
def A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
a = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
a = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
a = 0
while i < len(__lowerCAmelCase ):
a = vertices_list[i]
a = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
a = 0
else:
i += 1
a = sum(self.preflow[self.source_index] )
def A ( self : int , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
a = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A ( self : Any , __lowerCAmelCase : str ) -> int:
"""simple docstring"""
a = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
a = self.heights[to_index]
if min_height is not None:
a = min_height + 1
if __name__ == "__main__":
A_ : str = [0]
A_ : Union[str, Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A_ : Union[str, Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A_ : int = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A_ : Any = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( UpperCAmelCase__ ):
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a = input_file.read()
a = regexp.search(__lowerCAmelCase )
return match
def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a = regexp.finditer(__lowerCAmelCase )
a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 32
| 1
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
a = ""
a = ""
a = []
a = 0
a = 256
a = 0
a = 0
a = 0
a = 0
def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = cva.imread(__lowerCAmelCase , 0 )
a = copy.deepcopy(self.img )
a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
a = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
a = x[i] / self.k
self.sk += prk
a = (self.L - 1) * self.sk
if self.rem != 0:
a = int(last % last )
a = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
a = int(np.ma.count(self.img ) / self.img[1].size )
a = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a = self.img[j][i]
if num != self.last_list[num]:
a = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A ( self : Any ) -> int:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A ( self : Any ) -> int:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A_ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
| 1
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A_ : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :tuple , UpperCAmelCase__ :Path , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :List[Any]=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , enable_onnx_checker=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
else:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :str , UpperCAmelCase__ :int , UpperCAmelCase__ :bool = False ):
'''simple docstring'''
a = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
a = "cpu"
a = Path(UpperCAmelCase__ )
# VAE DECODER
a = AutoencoderKL.from_pretrained(model_path + "/vae" )
a = vae_decoder.config.latent_channels
# forward only through the decoder part
a = vae_decoder.decode
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , 25 , 25 ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=UpperCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
A_ : str = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 32
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 25),)
def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a , a = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if scheduler is None:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self : Any ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
a = dummy_past_residuals[: scheduler.config.solver_order]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = UniPCMultistepScheduler(**self.get_scheduler_config() )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a = DEISMultistepScheduler.from_config(scheduler.config )
a = DPMSolverMultistepScheduler.from_config(scheduler.config )
a = UniPCMultistepScheduler.from_config(scheduler.config )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
a = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.full_loop()
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = self.full_loop(prediction_type="v_prediction" )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 32
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
A_ : str = logging.getLogger(__name__)
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=None ) -> Optional[int]:
"""simple docstring"""
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
a = None
def A ( self : Optional[int] , __lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
a = self._infer_socket_ifname()
# avoid clash with the NCCL port
a = str(distributed_port + 1 )
a = dist.new_group(ranks=__lowerCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=torch.floataa ) -> Dict:
"""simple docstring"""
a = torch.empty(__lowerCAmelCase , dtype=__lowerCAmelCase )
dist.scatter(__lowerCAmelCase , src=0 , scatter_list=__lowerCAmelCase , group=self.process_group )
return target_tensor
def A ( self : Dict ) -> List[str]:
"""simple docstring"""
a = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
a = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCAmelCase )
return ifname
def A ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
a , a = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
# distributed training
a = dist.get_world_size(group=self.process_group )
# gather logic
a = None
if self._is_main():
a = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCAmelCase )]
dist.gather(torch.tensor(__lowerCAmelCase ) , dst=0 , gather_list=__lowerCAmelCase , group=self.process_group )
# scatter logic
a = question_hidden_states.shape[0]
a = []
a = []
if self._is_main():
assert len(__lowerCAmelCase ) == world_size
a , a = self._main_retrieve(torch.cat(__lowerCAmelCase ).numpy() , __lowerCAmelCase )
a , a = torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase )
a = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
a = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
a = self._scattered(__lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
a = self._scattered(__lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCAmelCase )
| 32
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = num_stages
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = intermediate_size
a = hidden_act
a = num_labels
a = initializer_range
a = out_features
a = out_indices
a = scope
def A ( self : Optional[Any] ) -> int:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
a = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = ConvNextVaModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def A ( self : List[str] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = False
a = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : Dict ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : int , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Dict ) -> None:
"""simple docstring"""
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 32
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
a = ""
a = ""
a = []
a = 0
a = 256
a = 0
a = 0
a = 0
a = 0
def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = cva.imread(__lowerCAmelCase , 0 )
a = copy.deepcopy(self.img )
a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
a = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
a = x[i] / self.k
self.sk += prk
a = (self.L - 1) * self.sk
if self.rem != 0:
a = int(last % last )
a = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
a = int(np.ma.count(self.img ) / self.img[1].size )
a = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a = self.img[j][i]
if num != self.last_list[num]:
a = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A ( self : Any ) -> int:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A ( self : Any ) -> int:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A_ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Tuple = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
a = self.unet.config.sample_size
a = (batch_size, 3, img_size, img_size)
a = self.unet
a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
a = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
a = model(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a , a = output.prev_sample, output.prev_sample_mean
a = sample_mean.clamp(0 , 1 )
a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 32
| 1
|
A_ : List[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def UpperCAmelCase__ ( UpperCAmelCase__ :bytes ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(UpperCAmelCase__ )
a = "".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
a = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
a = b"=" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
a = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a = (
"argument should be a bytes-like object or ASCII string, "
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
a = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
a = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
a = encoded_data[:-padding]
a = "".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
a = "".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
a = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 1_00
a = (5 * (century % 4) + 2) % 7
a = year % 1_00
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def A ( self : int ) -> Any:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 32
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
| 1
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A_ : Optional[Any] = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32
| 1
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _lowercase :
def __init__( self : Union[str, Any] , __lowerCAmelCase : Collection[float] | None = None ) -> None:
"""simple docstring"""
if components is None:
a = []
a = list(__lowerCAmelCase )
def __len__( self : str ) -> int:
"""simple docstring"""
return len(self.__components )
def __str__( self : str ) -> str:
"""simple docstring"""
return "(" + ",".join(map(__lowerCAmelCase , self.__components ) ) + ")"
def __add__( self : Any , __lowerCAmelCase : Vector ) -> Vector:
"""simple docstring"""
a = len(self )
if size == len(__lowerCAmelCase ):
a = [self.__components[i] + other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return Vector(__lowerCAmelCase )
else:
raise Exception("must have the same size" )
def __sub__( self : Dict , __lowerCAmelCase : Vector ) -> Vector:
"""simple docstring"""
a = len(self )
if size == len(__lowerCAmelCase ):
a = [self.__components[i] - other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return Vector(__lowerCAmelCase )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : List[str] , __lowerCAmelCase : float ) -> Vector:
"""simple docstring"""
...
@overload
def __mul__( self : Union[str, Any] , __lowerCAmelCase : Vector ) -> float:
"""simple docstring"""
...
def __mul__( self : Dict , __lowerCAmelCase : float | Vector ) -> float | Vector:
"""simple docstring"""
if isinstance(__lowerCAmelCase , (float, int) ):
a = [c * other for c in self.__components]
return Vector(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(self ) == len(__lowerCAmelCase ):
a = len(self )
a = [self.__components[i] * other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return sum(__lowerCAmelCase )
else: # error case
raise Exception("invalid operand!" )
def A ( self : Optional[int] ) -> Vector:
"""simple docstring"""
return Vector(self.__components )
def A ( self : Any , __lowerCAmelCase : int ) -> float:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def A ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : float ) -> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
a = value
def A ( self : Any ) -> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
a = [c**2 for c in self.__components]
return math.sqrt(sum(__lowerCAmelCase ) )
def A ( self : List[Any] , __lowerCAmelCase : Vector , __lowerCAmelCase : bool = False ) -> float:
"""simple docstring"""
a = self * other
a = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
return Vector([0] * dimension )
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ))
a = [0] * dimension
a = 1
return Vector(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :Vector , UpperCAmelCase__ :Vector ):
'''simple docstring'''
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and (isinstance(UpperCAmelCase__ , (int, float) ))
)
return x * scalar + y
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
random.seed(UpperCAmelCase__ )
a = [random.randint(UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
return Vector(UpperCAmelCase__ )
class _lowercase :
def __init__( self : Optional[Any] , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
a = matrix
a = w
a = h
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
a = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , __lowerCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] + other.component(__lowerCAmelCase , __lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(__lowerCAmelCase )
return Matrix(__lowerCAmelCase , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : List[str] , __lowerCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] - other.component(__lowerCAmelCase , __lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(__lowerCAmelCase )
return Matrix(__lowerCAmelCase , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : List[Any] , __lowerCAmelCase : float ) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self : Dict , __lowerCAmelCase : Vector ) -> Vector:
"""simple docstring"""
...
def __mul__( self : List[Any] , __lowerCAmelCase : float | Vector ) -> Vector | Matrix:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # matrix-vector
if len(__lowerCAmelCase ) == self.__width:
a = zero_vector(self.__height )
for i in range(self.__height ):
a = [
self.__matrix[i][j] * other.component(__lowerCAmelCase )
for j in range(self.__width )
]
ans.change_component(__lowerCAmelCase , sum(__lowerCAmelCase ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(__lowerCAmelCase , (int, float) ): # matrix-scalar
a = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowerCAmelCase , self.__width , self.__height )
return None
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.__height
def A ( self : int ) -> int:
"""simple docstring"""
return self.__width
def A ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def A ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float ) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
a = value
else:
raise Exception("change_component: indices out of bounds" )
def A ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
a = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowerCAmelCase ) ):
a = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowerCAmelCase , self.__width - 1 , self.__height - 1 ).determinant()
def A ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowerCAmelCase , __lowerCAmelCase )
else:
raise Exception("Indices out of bounds" )
def A ( self : Any ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
a = [
self.__matrix[0][y] * self.cofactor(0 , __lowerCAmelCase ) for y in range(self.__width )
]
return sum(__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = [[0] * n for _ in range(UpperCAmelCase__ )]
return Matrix(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
random.seed(UpperCAmelCase__ )
a = [
[random.randint(UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )
]
return Matrix(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 32
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
a = log_spec[:, :-1]
a = log_spec - 2_0.0
a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 32
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _lowercase ( unittest.TestCase ):
def A ( self : str ) -> Optional[Any]:
"""simple docstring"""
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
a = BlipProcessor(__lowerCAmelCase , __lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def A ( self : Tuple , **__lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer
def A ( self : List[str] , **__lowerCAmelCase : str ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def A ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : Tuple ) -> str:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : int ) -> Any:
"""simple docstring"""
a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCAmelCase , return_tensors="np" )
a = processor(images=__lowerCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : str ) -> Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
a = "lower newer"
a = processor(text=__lowerCAmelCase )
a = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
a = "lower newer"
a = self.prepare_image_inputs()
a = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def A ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCAmelCase )
a = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : str ) -> Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
a = "lower newer"
a = self.prepare_image_inputs()
a = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 32
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any:
"""simple docstring"""
a = parent
a = batch_size
a = is_training
a = use_auxiliary_loss
a = num_queries
a = num_channels
a = min_size
a = max_size
a = num_labels
a = mask_feature_size
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A ( self : str ) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
a , a , a , a , a = self.prepare_config_and_inputs()
a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
a = output.encoder_hidden_states
a = output.pixel_decoder_hidden_states
a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
a = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = MaskFormerModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : int ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
a = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A ( self : str ) -> Dict:
"""simple docstring"""
a = (self.model_tester.min_size,) * 2
a = {
"pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def A ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = True
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : int = 1E-4
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : int ) -> Optional[int]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
a = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : int ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a = inputs["pixel_values"].to(__lowerCAmelCase )
a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]]
a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 32
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : List[str] = 25_00_04
A_ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class _lowercase ( UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = MBartaaTokenizer
_UpperCAmelCase = MBartaaTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def A ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = MBartaaTokenizer(__lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = "<s>"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self : int ) -> str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCAmelCase ) , 1054 )
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
a = MBartaaTokenizer(__lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCAmelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
a = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def A ( self : Dict ) -> Dict:
"""simple docstring"""
a = {"input_ids": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def A ( self : Dict ) -> int:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
a = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCAmelCase )
a = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCAmelCase )
a = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
a = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCAmelCase )
a = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
a = tempfile.mkdtemp()
a = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
a = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a = tokenizer_r.from_pretrained(__lowerCAmelCase )
a = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = '''facebook/mbart-large-50-one-to-many-mmt'''
_UpperCAmelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_UpperCAmelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_UpperCAmelCase = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def A ( cls : List[str] ) -> Optional[int]:
"""simple docstring"""
a = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
a = 1
return cls
def A ( self : Tuple ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_0038 )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
a = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
a = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def A ( self : Any ) -> List[str]:
"""simple docstring"""
a = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __lowerCAmelCase )
a = 10
a = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0053, 25_0001] )
def A ( self : str ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
a = MBartaaTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors="pt" )
a = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def A ( self : str ) -> str:
"""simple docstring"""
a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
a = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
a = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors="pt" )
a = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors="pt" )
a = targets["input_ids"]
a = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[25_0004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
} , )
| 32
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def A ( self : int ) -> Any:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 32
| 1
|
import sys
from collections import defaultdict
class _lowercase :
def __init__( self : List[Any] ) -> List[str]:
"""simple docstring"""
a = []
def A ( self : Tuple , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
return self.node_position[vertex]
def A ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = pos
def A ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
a = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
a = 2 * start + 1
else:
a = 2 * start + 2
if heap[smallest_child] < heap[start]:
a , a = heap[smallest_child], positions[smallest_child]
a , a = (
heap[start],
positions[start],
)
a , a = temp, tempa
a = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __lowerCAmelCase )
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
a = position[index]
while index != 0:
a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
a = heap[parent]
a = position[parent]
self.set_position(position[parent] , __lowerCAmelCase )
else:
a = val
a = temp
self.set_position(__lowerCAmelCase , __lowerCAmelCase )
break
a = parent
else:
a = val
a = temp
self.set_position(__lowerCAmelCase , 0 )
def A ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
a = len(__lowerCAmelCase ) // 2 - 1
for i in range(__lowerCAmelCase , -1 , -1 ):
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
def A ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = positions[0]
a = sys.maxsize
self.top_to_bottom(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
return temp
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] ):
'''simple docstring'''
a = Heap()
a = [0] * len(UpperCAmelCase__ )
a = [-1] * len(UpperCAmelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
a = [] # Heap of Distance of vertices from their neighboring vertex
a = []
for vertex in range(len(UpperCAmelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase__ )
heap.node_position.append(UpperCAmelCase__ )
a = []
a = 1
a = sys.maxsize
for neighbor, distance in adjacency_list[0]:
a = 0
a = distance
heap.heapify(UpperCAmelCase__ , UpperCAmelCase__ )
for _ in range(1 , len(UpperCAmelCase__ ) ):
a = heap.delete_minimum(UpperCAmelCase__ , UpperCAmelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
a = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase__ )]
):
a = distance
heap.bottom_to_top(
UpperCAmelCase__ , heap.get_position(UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ )
a = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A_ : List[Any] = int(input('''Enter number of edges: ''').strip())
A_ : Dict = defaultdict(list)
for _ in range(edges_number):
A_ : List[str] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 32
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = str(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" )
def UpperCAmelCase__ ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
a = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
a = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
| 1
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
UpperCAmelCase__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' )
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 32
| 1
|
A_ : List[str] = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
A_ : Optional[Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Union[str, Any] ):
'''simple docstring'''
a = start
# add current to visited
visited.append(UpperCAmelCase__ )
a = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
a = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
for vertice in vertices:
if vertice not in visited:
a = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# return sort
return sort
if __name__ == "__main__":
A_ : List[Any] = topological_sort('''a''', [], [])
print(sort)
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''lilt'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = classifier_dropout
a = channel_shrink_ratio
a = max_ad_position_embeddings
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :int ):
'''simple docstring'''
a = [0 for i in range(r + 1 )]
# nc0 = 1
a = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a = min(UpperCAmelCase__ , UpperCAmelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 32
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = TaConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 32
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
A_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
a = r"\w+[.]\d+"
a = re.findall(UpperCAmelCase__ , UpperCAmelCase__ )
for pat in pats:
a = key.replace(UpperCAmelCase__ , "_".join(pat.split("." ) ) )
return key
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Union[str, Any] ):
'''simple docstring'''
a = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Union[str, Any]=42 ):
'''simple docstring'''
a = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a = flax_model.init_weights(PRNGKey(UpperCAmelCase__ ) )
a = flatten_dict(UpperCAmelCase__ )
a = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a = rename_key(UpperCAmelCase__ )
a = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a , a = rename_key_and_reshape_tensor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
a = jnp.asarray(UpperCAmelCase__ )
return unflatten_dict(UpperCAmelCase__ )
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
a = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a , a = emb.weight.shape
a = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
a = emb.weight.data
return lin_layer
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = torch.load(UpperCAmelCase__ , map_location="cpu" )
a = mam_aaa["args"] or mam_aaa["cfg"]["model"]
a = mam_aaa["model"]
remove_ignore_keys_(UpperCAmelCase__ )
a = state_dict["encoder.embed_tokens.weight"].shape[0]
a = MaMaaaConfig(
vocab_size=UpperCAmelCase__ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
a = state_dict["decoder.embed_tokens.weight"]
a = MaMaaaForConditionalGeneration(UpperCAmelCase__ )
model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A_ : Dict = parser.parse_args()
A_ : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 32
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowercase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None:
"""simple docstring"""
a = None
for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ):
a = Node(__lowerCAmelCase , self.head )
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
"""simple docstring"""
a = self.head
while node:
yield node.data
a = node.next_node
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(__lowerCAmelCase ) for node in self] )
def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 32
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Union[str, Any] = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''wavlm'''
def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Dict=768 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[str]=3072 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : Tuple=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : List[Any]=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Dict=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : str=320 , __lowerCAmelCase : List[str]=800 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=0.0_5 , __lowerCAmelCase : int=10 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Optional[int]=10 , __lowerCAmelCase : Dict=320 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Union[str, Any]=100 , __lowerCAmelCase : str=256 , __lowerCAmelCase : int=256 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Tuple="mean" , __lowerCAmelCase : Dict=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]=256 , __lowerCAmelCase : Tuple=(512, 512, 512, 512, 1500) , __lowerCAmelCase : List[str]=(5, 3, 3, 1, 1) , __lowerCAmelCase : Tuple=(1, 2, 3, 1, 1) , __lowerCAmelCase : Optional[int]=512 , __lowerCAmelCase : Any=80 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : List[str] , ) -> int:
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(__lowerCAmelCase )
a = list(__lowerCAmelCase )
a = list(__lowerCAmelCase )
a = conv_bias
a = num_buckets
a = max_bucket_distance
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layerdrop
a = layer_norm_eps
a = initializer_range
a = num_ctc_classes
a = vocab_size
a = do_stable_layer_norm
a = use_weighted_layer_sum
a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
# parameters for pretraining with codevector quantized representations
a = num_codevectors_per_group
a = num_codevector_groups
a = contrastive_logits_temperature
a = num_negatives
a = codevector_dim
a = proj_codevector_dim
a = diversity_loss_weight
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# adapter
a = add_adapter
a = adapter_kernel_size
a = adapter_stride
a = num_adapter_layers
a = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a = list(__lowerCAmelCase )
a = list(__lowerCAmelCase )
a = list(__lowerCAmelCase )
a = xvector_output_dim
@property
def A ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 32
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 32
| 1
|
import re
import string
import numpy as np
import datasets
A_ : Union[str, Any] = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
A_ : Optional[Any] = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
A_ : List[Any] = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def A ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=False , ) -> str:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a = np.array([re.sub(__lowerCAmelCase , "" , __lowerCAmelCase ) for x in predictions] )
a = np.array([re.sub(__lowerCAmelCase , "" , __lowerCAmelCase ) for x in references] )
else:
a = np.asarray(__lowerCAmelCase )
a = np.asarray(__lowerCAmelCase )
if ignore_case:
a = np.char.lower(__lowerCAmelCase )
a = np.char.lower(__lowerCAmelCase )
if ignore_punctuation:
a = string.punctuation.maketrans("" , "" , string.punctuation )
a = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
a = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
if ignore_numbers:
a = string.digits.maketrans("" , "" , string.digits )
a = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
a = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
a = predictions == references
return {"exact_match": np.mean(__lowerCAmelCase ) * 100}
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : str = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ):
_UpperCAmelCase = '''focalnet'''
def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = use_conv_embed
a = hidden_sizes
a = depths
a = focal_levels
a = focal_windows
a = hidden_act
a = mlp_ratio
a = hidden_dropout_prob
a = drop_path_rate
a = use_layerscale
a = layerscale_value
a = use_post_layernorm
a = use_post_layernorm_in_modulation
a = normalize_modulator
a = initializer_range
a = layer_norm_eps
a = encoder_stride
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 32
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
A_ : List[Any] = '''\
Text data.
Second line of data.'''
A_ : Any = '''file'''
@pytest.fixture(scope="session" )
def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ):
'''simple docstring'''
a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
a = bytes(UpperCAmelCase__ , "utf-8" )
with zstd.open(UpperCAmelCase__ , "wb" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , UpperCAmelCase__ ) , "w" ) as f:
f.write(UpperCAmelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Dict , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
a = input_paths[compression_format]
a = tmp_path / "cache"
a = DownloadConfig(cache_dir=UpperCAmelCase__ , extract_compressed_file=UpperCAmelCase__ )
a = cached_path(UpperCAmelCase__ , download_config=UpperCAmelCase__ )
with open(UpperCAmelCase__ ) as f:
a = f.read()
with open(UpperCAmelCase__ ) as f:
a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Any , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Tuple ):
'''simple docstring'''
a = "custom_cache"
a = "custom_extracted_dir"
a = tmp_path / "custom_extracted_path"
if default_extracted:
a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , UpperCAmelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(UpperCAmelCase__ ) )
a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
a = xz_file
a = (
DownloadConfig(extract_compressed_file=UpperCAmelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCAmelCase__ )
)
a = cached_path(UpperCAmelCase__ , download_config=UpperCAmelCase__ )
assert Path(UpperCAmelCase__ ).parent.parts[-2:] == expected
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = str(Path(UpperCAmelCase__ ).resolve() )
assert cached_path(UpperCAmelCase__ ) == text_file
# relative path
a = str(Path(UpperCAmelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCAmelCase__ ) == text_file
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(UpperCAmelCase__ ):
cached_path(UpperCAmelCase__ )
# relative path
a = "./__missing_file__.txt"
with pytest.raises(UpperCAmelCase__ ):
cached_path(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ):
'''simple docstring'''
a = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(UpperCAmelCase__ ) as f:
a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def UpperCAmelCase__ ( ):
'''simple docstring'''
with pytest.raises(UpperCAmelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCAmelCase__ ):
http_get("https://huggingface.co" , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCAmelCase__ ):
ftp_get("ftp://huggingface.co" , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCAmelCase__ ):
fsspec_get("s3://huggingface.co" , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
fsspec_head("s3://huggingface.co" )
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
a , a = head.next, head
while fast and fast.next:
a = fast.next.next
a = slow.next
a = slow.next
a = None # Don't forget here! But forget still works!
# reverse the second part
a = None
while second:
a = second.next
a = node
a = second
a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a = node.next
a = head.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a = a = a = head
while fast and fast.next:
a , a = fast.next.next, slow.next
# 2. Push the second half into the stack
a = [slow.val]
while slow.next:
a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a = cur.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head or not head.next:
return True
a = {}
a = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase__ )
else:
a = [pos]
a = head.next
pos += 1
a = pos - 1
a = 0
for v in d.values():
if len(UpperCAmelCase__ ) % 2 != 0:
middle += 1
else:
a = 0
for i in range(0 , len(UpperCAmelCase__ ) ):
if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 32
| 1
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A_ : Any = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
A_ : str = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = "rougeLsum"
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = ["rouge1", "rouge2", "rougeL"]
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
a = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
a = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase__ )["rougeLsum"]
a = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Path("examples/seq2seq/test_data/wmt_en_ro" )
a = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
a = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 32
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = embedding_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
a = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = self.num_choices
a = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = MobileBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def A ( self : List[Any] ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self : int ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
A_ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase )
a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 32
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
for char in word:
a = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] ):
'''simple docstring'''
a = set()
for token in tokens:
a = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
a = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
a = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
a = bert_tokens
a , a = 0, len(UpperCAmelCase__ )
while start < end:
a = True
if is_chinese(bert_word[start] ):
a = min(end - start , UpperCAmelCase__ )
for i in range(UpperCAmelCase__ , 1 , -1 ):
a = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a = "##" + bert_word[j]
a = start + i
a = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :LTP , UpperCAmelCase__ :BertTokenizer ):
'''simple docstring'''
a = []
for i in range(0 , len(UpperCAmelCase__ ) , 1_00 ):
a = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
a = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
a = []
for i in range(0 , len(UpperCAmelCase__ ) , 1_00 ):
a = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
a = []
for input_ids, chinese_word in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
a = []
for id in input_ids:
a = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
a = add_sub_symbol(UpperCAmelCase__ , UpperCAmelCase__ )
a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
a = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
a = f.readlines()
a = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a = LTP(args.ltp ) # faster in GPU device
a = BertTokenizer.from_pretrained(args.bert )
a = prepare_ref(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
a = [json.dumps(UpperCAmelCase__ ) + "\n" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
A_ : List[str] = parser.parse_args()
main(args)
| 32
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( UpperCAmelCase__ ):
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a = input_file.read()
a = regexp.search(__lowerCAmelCase )
return match
def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a = regexp.finditer(__lowerCAmelCase )
a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 32
| 1
|
from manim import *
class _lowercase ( UpperCAmelCase__ ):
def A ( self : int ) -> List[Any]:
"""simple docstring"""
a = Rectangle(height=0.5 , width=0.5 )
a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
a = Rectangle(height=0.2_5 , width=0.2_5 )
a = [mem.copy() for i in range(6 )]
a = [mem.copy() for i in range(6 )]
a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
a = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
a = Text("CPU" , font_size=24 )
a = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
a = [mem.copy() for i in range(4 )]
a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
a = Text("GPU" , font_size=24 )
a = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
a = [mem.copy() for i in range(6 )]
a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
a = Text("Model" , font_size=24 )
a = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
a = []
a = []
for i, rect in enumerate(__lowerCAmelCase ):
a = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 )
target.move_to(__lowerCAmelCase )
model_arr.append(__lowerCAmelCase )
a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
a = [meta_mem.copy() for i in range(6 )]
a = [meta_mem.copy() for i in range(6 )]
a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
a = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
a = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
a = Text("Disk" , font_size=24 )
a = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4, -1.2_5, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
a = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
a = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) )
a = Square(0.3 )
input.set_fill(__lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 )
self.play(Write(__lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.0_2 )
self.play(MoveToTarget(__lowerCAmelCase ) )
self.play(FadeOut(__lowerCAmelCase ) )
a = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
a = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , __lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
a = AnimationGroup(
FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a = 0.7
self.play(
Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
a = a_c
a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , )
a = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) )
self.wait()
| 32
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowercase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None:
"""simple docstring"""
a = None
for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ):
a = Node(__lowerCAmelCase , self.head )
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
"""simple docstring"""
a = self.head
while node:
yield node.data
a = node.next_node
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(__lowerCAmelCase ) for node in self] )
def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 32
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 25),)
def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a , a = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if scheduler is None:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self : Any ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
a = dummy_past_residuals[: scheduler.config.solver_order]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = UniPCMultistepScheduler(**self.get_scheduler_config() )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a = DEISMultistepScheduler.from_config(scheduler.config )
a = DPMSolverMultistepScheduler.from_config(scheduler.config )
a = UniPCMultistepScheduler.from_config(scheduler.config )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
a = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.full_loop()
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = self.full_loop(prediction_type="v_prediction" )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 32
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A_ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''pixel_values''']
def __init__( self : Dict , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : bool = True , **__lowerCAmelCase : List[str] , ) -> None:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = size if size is not None else {"shortest_edge": 224}
a = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
a = crop_size if crop_size is not None else {"height": 224, "width": 224}
a = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name="crop_size" )
a = do_resize
a = size
a = resample
a = do_center_crop
a = crop_size
a = do_rescale
a = rescale_factor
a = do_normalize
a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a = image_std if image_std is not None else OPENAI_CLIP_STD
a = do_convert_rgb
def A ( self : int , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
a = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a = get_resize_output_image_size(__lowerCAmelCase , size=size["shortest_edge"] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self : Dict , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
a = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCAmelCase , size=(size["height"], size["width"]) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self : int , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[int, float] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self : List[Any] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : int = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCAmelCase : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
a = do_resize if do_resize is not None else self.do_resize
a = size if size is not None else self.size
a = get_size_dict(__lowerCAmelCase , param_name="size" , default_to_square=__lowerCAmelCase )
a = resample if resample is not None else self.resample
a = do_center_crop if do_center_crop is not None else self.do_center_crop
a = crop_size if crop_size is not None else self.crop_size
a = get_size_dict(__lowerCAmelCase , param_name="crop_size" , default_to_square=__lowerCAmelCase )
a = do_rescale if do_rescale is not None else self.do_rescale
a = rescale_factor if rescale_factor is not None else self.rescale_factor
a = do_normalize if do_normalize is not None else self.do_normalize
a = image_mean if image_mean is not None else self.image_mean
a = image_std if image_std is not None else self.image_std
a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
a = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
a = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
a = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
a = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
a = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
a = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
a = {"pixel_values": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 32
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = num_stages
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = intermediate_size
a = hidden_act
a = num_labels
a = initializer_range
a = out_features
a = out_indices
a = scope
def A ( self : Optional[Any] ) -> int:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
a = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = ConvNextVaModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def A ( self : List[str] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = False
a = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : Dict ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32
| 1
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A_ : List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
A_ : List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ : Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ : Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ : str = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ : Tuple = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ : Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
A_ : Optional[int] = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ : Tuple = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
A_ : Optional[int] = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ : str = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
A_ : Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ : Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
A_ : str = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
A_ : List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
A_ : int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
A_ : List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
A_ : Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
A_ : int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
A_ : Optional[Any] = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ : Tuple = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
A_ : int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
A_ : Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
A_ : Dict = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ : str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
A_ : str = ''''''
A_ : List[str] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
A_ : Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ : List[Any] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
assert ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :int ):
'''simple docstring'''
with pytest.raises(UpperCAmelCase__ , match=re.escape(expected_error.format(path="root" ) ) ):
a = ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
with pytest.raises(UpperCAmelCase__ , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ , suppress_parsing_errors=UpperCAmelCase__ )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
a = Path(UpperCAmelCase__ ) / "README.md"
with open(UpperCAmelCase__ , "w+" ) as readme_file:
readme_file.write(UpperCAmelCase__ )
a = ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
a = Path(UpperCAmelCase__ ) / "README.md"
with open(UpperCAmelCase__ , "w+" ) as readme_file:
readme_file.write(UpperCAmelCase__ )
a = expected_error.format(path=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ , match=re.escape(UpperCAmelCase__ ) ):
a = ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
a = Path(UpperCAmelCase__ ) / "README.md"
with open(UpperCAmelCase__ , "w+" ) as readme_file:
readme_file.write(UpperCAmelCase__ )
a = expected_error.format(path=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ , match=re.escape(UpperCAmelCase__ ) ):
ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
a = Path(UpperCAmelCase__ ) / "README.md"
with open(UpperCAmelCase__ , "w+" ) as readme_file:
readme_file.write(UpperCAmelCase__ )
ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ , suppress_parsing_errors=UpperCAmelCase__ )
| 32
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
a = ""
a = ""
a = []
a = 0
a = 256
a = 0
a = 0
a = 0
a = 0
def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = cva.imread(__lowerCAmelCase , 0 )
a = copy.deepcopy(self.img )
a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
a = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
a = x[i] / self.k
self.sk += prk
a = (self.L - 1) * self.sk
if self.rem != 0:
a = int(last % last )
a = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
a = int(np.ma.count(self.img ) / self.img[1].size )
a = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a = self.img[j][i]
if num != self.last_list[num]:
a = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A ( self : Any ) -> int:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A ( self : Any ) -> int:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A_ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(UpperCAmelCase__ ) , UpperCAmelCase__ )
return number - int(UpperCAmelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 32
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
a = self.unet.config.sample_size
a = (batch_size, 3, img_size, img_size)
a = self.unet
a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
a = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
a = model(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a , a = output.prev_sample, output.prev_sample_mean
a = sample_mean.clamp(0 , 1 )
a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 32
| 1
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Optional[int] ):
'''simple docstring'''
a = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
a = load_file(UpperCAmelCase__ )
a = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
a = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
a = pipeline.text_encoder
else:
a = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
a = pipeline.unet
# find the target layer
a = layer_infos.pop(0 )
while len(UpperCAmelCase__ ) > -1:
try:
a = curr_layer.__getattr__(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
a = layer_infos.pop(0 )
elif len(UpperCAmelCase__ ) == 0:
break
except Exception:
if len(UpperCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
a = layer_infos.pop(0 )
a = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(UpperCAmelCase__ )
else:
pair_keys.append(UpperCAmelCase__ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
a = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
a = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
a = state_dict[pair_keys[0]].to(torch.floataa )
a = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase__ )
return pipeline
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
A_ : Tuple = parser.parse_args()
A_ : int = args.base_model_path
A_ : int = args.checkpoint_path
A_ : Union[str, Any] = args.dump_path
A_ : str = args.lora_prefix_unet
A_ : Tuple = args.lora_prefix_text_encoder
A_ : Dict = args.alpha
A_ : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A_ : List[str] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 32
|
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 1_00
a = (5 * (century % 4) + 2) % 7
a = year % 1_00
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
for param in module.parameters():
a = False
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
a = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
a = plt.imshow(UpperCAmelCase__ )
fig.axes.get_xaxis().set_visible(UpperCAmelCase__ )
fig.axes.get_yaxis().set_visible(UpperCAmelCase__ )
plt.show()
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = datetime.now()
a = current_time.strftime("%H:%M:%S" )
return timestamp
| 32
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
| 1
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
A_ : int = logging.get_logger(__name__)
A_ : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ : str = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
A_ : Optional[int] = {'''allegro/herbert-base-cased''': 5_14}
A_ : Optional[Any] = {}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = HerbertTokenizer
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Optional[Any]="<unk>" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : Union[str, Any]="<mask>" , __lowerCAmelCase : str="</s>" , **__lowerCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
a = [self.cls_token_id]
a = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def A ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowercase :
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : Any=10 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Any=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Union[str, Any]=10 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : Any="divided_space_time" , __lowerCAmelCase : Union[str, Any]=None , ) -> str:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = patch_size
a = num_frames
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = attention_type
a = initializer_range
a = scope
a = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a = (image_size // patch_size) ** 2
a = (num_frames) * self.num_patches_per_frame + 1
def A ( self : str ) -> int:
"""simple docstring"""
a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def A ( self : List[Any] ) -> List[str]:
"""simple docstring"""
a = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a = self.num_labels
return config
def A ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
a = TimesformerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
a = TimesformerForVideoClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify the logits shape
a = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __lowerCAmelCase )
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = TimesformerModelTester(self )
a = ConfigTester(
self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
a = copy.deepcopy(__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def A ( self : int ) -> List[str]:
"""simple docstring"""
pass
def A ( self : Dict ) -> List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowerCAmelCase )
@slow
def A ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TimesformerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
for model_class in self.all_model_classes:
a = self.model_tester.seq_length
a = self.model_tester.num_frames
a = True
a = False
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a = len(__lowerCAmelCase )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCAmelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.hidden_states
a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
a = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> str:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A ( self : int ) -> int:
"""simple docstring"""
a = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__lowerCAmelCase )
a = self.default_image_processor
a = prepare_video()
a = image_processor(video[:8] , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
a = log_spec[:, :-1]
a = log_spec - 2_0.0
a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 32
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['''ChineseCLIPFeatureExtractor''']
A_ : Union[str, Any] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any:
"""simple docstring"""
a = parent
a = batch_size
a = is_training
a = use_auxiliary_loss
a = num_queries
a = num_channels
a = min_size
a = max_size
a = num_labels
a = mask_feature_size
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A ( self : str ) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
a , a , a , a , a = self.prepare_config_and_inputs()
a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
a = output.encoder_hidden_states
a = output.pixel_decoder_hidden_states
a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
a = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = MaskFormerModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : int ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
a = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A ( self : str ) -> Dict:
"""simple docstring"""
a = (self.model_tester.min_size,) * 2
a = {
"pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def A ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = True
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : int = 1E-4
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : int ) -> Optional[int]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
a = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : int ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a = inputs["pixel_values"].to(__lowerCAmelCase )
a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]]
a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 32
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : int = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''pixel_values''']
def __init__( self : Optional[int] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , **__lowerCAmelCase : Dict , ) -> None:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = size if size is not None else {"height": 224, "width": 224}
a = get_size_dict(__lowerCAmelCase )
a = crop_size if crop_size is not None else {"height": 224, "width": 224}
a = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name="crop_size" )
a = do_resize
a = do_rescale
a = do_normalize
a = do_center_crop
a = crop_size
a = size
a = resample
a = rescale_factor
a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
a = get_size_dict(__lowerCAmelCase )
if "shortest_edge" in size:
a = get_resize_output_image_size(__lowerCAmelCase , size=size["shortest_edge"] , default_to_square=__lowerCAmelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
a = (size["height"], size["width"])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self : Union[str, Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
a = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCAmelCase , size=(size["height"], size["width"]) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self : Dict , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : int ) -> np.ndarray:
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self : Tuple , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowerCAmelCase : Tuple , ) -> BatchFeature:
"""simple docstring"""
a = do_resize if do_resize is not None else self.do_resize
a = do_rescale if do_rescale is not None else self.do_rescale
a = do_normalize if do_normalize is not None else self.do_normalize
a = do_center_crop if do_center_crop is not None else self.do_center_crop
a = crop_size if crop_size is not None else self.crop_size
a = get_size_dict(__lowerCAmelCase , param_name="crop_size" , default_to_square=__lowerCAmelCase )
a = resample if resample is not None else self.resample
a = rescale_factor if rescale_factor is not None else self.rescale_factor
a = image_mean if image_mean is not None else self.image_mean
a = image_std if image_std is not None else self.image_std
a = size if size is not None else self.size
a = get_size_dict(__lowerCAmelCase )
if not is_batched(__lowerCAmelCase ):
a = [images]
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
a = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
a = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
a = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
a = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
a = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
a = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
a = {"pixel_values": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 32
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def A ( self : int ) -> Any:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 32
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : List[str] = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''roc_bert'''
def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=3_0522 , __lowerCAmelCase : int=768 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : Tuple=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[int]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=768 , __lowerCAmelCase : Optional[int]=910 , __lowerCAmelCase : List[Any]=512 , __lowerCAmelCase : Any=2_4858 , __lowerCAmelCase : int=True , **__lowerCAmelCase : int , ) -> List[str]:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = type_vocab_size
a = layer_norm_eps
a = use_cache
a = enable_pronunciation
a = enable_shape
a = pronunciation_embed_dim
a = pronunciation_vocab_size
a = shape_embed_dim
a = shape_vocab_size
a = concat_input
a = position_embedding_type
a = classifier_dropout
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = str(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" )
def UpperCAmelCase__ ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
a = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
a = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int = 2_00_00_00 ):
'''simple docstring'''
a = [0 for i in range(n + 1 )]
a = 1
a = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCAmelCase__ ):
a = 1
a = 0
for i in range(UpperCAmelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' )
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 32
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' )
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''lilt'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = classifier_dropout
a = channel_shrink_ratio
a = max_ad_position_embeddings
| 32
| 1
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
A_ : Union[str, Any] = get_logger(__name__)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
a = (
os.path.join(__lowerCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
a = Extractor
def A ( self : int , __lowerCAmelCase : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
a = os.path.abspath(__lowerCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(__lowerCAmelCase ) )
def A ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(__lowerCAmelCase ) and not (os.path.isdir(__lowerCAmelCase ) and os.listdir(__lowerCAmelCase ))
)
def A ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ) -> str:
"""simple docstring"""
a = self.extractor.infer_extractor_format(__lowerCAmelCase )
if not extractor_format:
return input_path
a = self._get_output_path(__lowerCAmelCase )
if self._do_extract(__lowerCAmelCase , __lowerCAmelCase ):
self.extractor.extract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return output_path
class _lowercase ( UpperCAmelCase__ ):
@classmethod
@abstractmethod
def A ( cls : Dict , __lowerCAmelCase : Union[Path, str] , **__lowerCAmelCase : str ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
...
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ):
_UpperCAmelCase = []
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : int ) -> int:
"""simple docstring"""
with open(__lowerCAmelCase , "rb" ) as f:
return f.read(__lowerCAmelCase )
@classmethod
def A ( cls : List[Any] , __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
a = max(len(__lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
a = cls.read_magic_number(__lowerCAmelCase , __lowerCAmelCase )
except OSError:
return False
return any(magic_number.startswith(__lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
class _lowercase ( UpperCAmelCase__ ):
@classmethod
def A ( cls : str , __lowerCAmelCase : Union[Path, str] , **__lowerCAmelCase : Optional[Any] ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(__lowerCAmelCase )
@staticmethod
def A ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
def resolved(__lowerCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(__lowerCAmelCase ) )
def badpath(__lowerCAmelCase : str , __lowerCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ).startswith(__lowerCAmelCase )
def badlink(__lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
a = resolved(os.path.join(__lowerCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__lowerCAmelCase )
a = resolved(__lowerCAmelCase )
for finfo in members:
if badpath(finfo.name , __lowerCAmelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__lowerCAmelCase , __lowerCAmelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__lowerCAmelCase , __lowerCAmelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
a = tarfile.open(__lowerCAmelCase )
tar_file.extractall(__lowerCAmelCase , members=TarExtractor.safemembers(__lowerCAmelCase , __lowerCAmelCase ) )
tar_file.close()
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = [b'''\x1F\x8B''']
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(__lowerCAmelCase , "rb" ) as gzip_file:
with open(__lowerCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__lowerCAmelCase , __lowerCAmelCase )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def A ( cls : str , __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(__lowerCAmelCase , magic_number=__lowerCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__lowerCAmelCase , "rb" ) as fp:
a = _EndRecData(__lowerCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
a = fp.read(__lowerCAmelCase ) # CD is where we expect it to be
if len(__lowerCAmelCase ) == sizeCentralDir:
a = struct.unpack(__lowerCAmelCase , __lowerCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with zipfile.ZipFile(__lowerCAmelCase , "r" ) as zip_file:
zip_file.extractall(__lowerCAmelCase )
zip_file.close()
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(__lowerCAmelCase ) as compressed_file:
with open(__lowerCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__lowerCAmelCase , __lowerCAmelCase )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
a = rarfile.RarFile(__lowerCAmelCase )
rf.extractall(__lowerCAmelCase )
rf.close()
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
a = zstd.ZstdDecompressor()
with open(__lowerCAmelCase , "rb" ) as ifh, open(__lowerCAmelCase , "wb" ) as ofh:
dctx.copy_stream(__lowerCAmelCase , __lowerCAmelCase )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = [b'''\x42\x5A\x68''']
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(__lowerCAmelCase , "rb" ) as compressed_file:
with open(__lowerCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__lowerCAmelCase , __lowerCAmelCase )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with pyazr.SevenZipFile(__lowerCAmelCase , "r" ) as archive:
archive.extractall(__lowerCAmelCase )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = [b'''\x04\x22\x4D\x18''']
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__lowerCAmelCase , "rb" ) as compressed_file:
with open(__lowerCAmelCase , "wb" ) as extracted_file:
shutil.copyfileobj(__lowerCAmelCase , __lowerCAmelCase )
class _lowercase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A ( cls : Dict ) -> Any:
"""simple docstring"""
return max(
len(__lowerCAmelCase )
for extractor in cls.extractors.values()
if issubclass(__lowerCAmelCase , __lowerCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A ( __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : int ) -> int:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(__lowerCAmelCase , magic_number_length=__lowerCAmelCase )
except OSError:
return b""
@classmethod
def A ( cls : Tuple , __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__lowerCAmelCase , )
a = cls.infer_extractor_format(__lowerCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A ( cls : str , __lowerCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
a = cls._get_magic_number_max_length()
a = cls._read_magic_number(__lowerCAmelCase , __lowerCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__lowerCAmelCase , magic_number=__lowerCAmelCase ):
return extractor_format
@classmethod
def A ( cls : Any , __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Union[Path, str] , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(__lowerCAmelCase ) , exist_ok=__lowerCAmelCase )
# Prevent parallel extractions
a = str(Path(__lowerCAmelCase ).with_suffix(".lock" ) )
with FileLock(__lowerCAmelCase ):
shutil.rmtree(__lowerCAmelCase , ignore_errors=__lowerCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__lowerCAmelCase , )
a = extractor if extractor != "deprecated" else extractor_format
else:
a = cls.extractors[extractor_format]
return extractor.extract(__lowerCAmelCase , __lowerCAmelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__lowerCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__lowerCAmelCase ):
return extractor.extract(__lowerCAmelCase , __lowerCAmelCase )
| 32
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = TaConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 32
| 1
|
import sys
A_ : List[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase__ ( UpperCAmelCase__ :str = N ):
'''simple docstring'''
a = -sys.maxsize - 1
for i in range(len(UpperCAmelCase__ ) - 12 ):
a = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
a = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
a , a = head.next, head
while fast and fast.next:
a = fast.next.next
a = slow.next
a = slow.next
a = None # Don't forget here! But forget still works!
# reverse the second part
a = None
while second:
a = second.next
a = node
a = second
a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a = node.next
a = head.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a = a = a = head
while fast and fast.next:
a , a = fast.next.next, slow.next
# 2. Push the second half into the stack
a = [slow.val]
while slow.next:
a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a = cur.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head or not head.next:
return True
a = {}
a = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase__ )
else:
a = [pos]
a = head.next
pos += 1
a = pos - 1
a = 0
for v in d.values():
if len(UpperCAmelCase__ ) % 2 != 0:
middle += 1
else:
a = 0
for i in range(0 , len(UpperCAmelCase__ ) ):
if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 32
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowercase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None:
"""simple docstring"""
a = None
for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ):
a = Node(__lowerCAmelCase , self.head )
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
"""simple docstring"""
a = self.head
while node:
yield node.data
a = node.next_node
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(__lowerCAmelCase ) for node in self] )
def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 32
| 1
|
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 32
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 32
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list ):
'''simple docstring'''
a = np.array([[1, item, train_mtch[i]] for i, item in enumerate(UpperCAmelCase__ )] )
a = np.array(UpperCAmelCase__ )
a = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , UpperCAmelCase__ ) ) , x.transpose() ) , UpperCAmelCase__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list ):
'''simple docstring'''
a = (1, 2, 1)
a = (1, 1, 0, 7)
a = SARIMAX(
UpperCAmelCase__ , exog=UpperCAmelCase__ , order=UpperCAmelCase__ , seasonal_order=UpperCAmelCase__ )
a = model.fit(disp=UpperCAmelCase__ , maxiter=6_00 , method="nm" )
a = model_fit.predict(1 , len(UpperCAmelCase__ ) , exog=[test_match] )
return result[0]
def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :list , UpperCAmelCase__ :list ):
'''simple docstring'''
a = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(UpperCAmelCase__ , UpperCAmelCase__ )
a = regressor.predict(UpperCAmelCase__ )
return y_pred[0]
def UpperCAmelCase__ ( UpperCAmelCase__ :list ):
'''simple docstring'''
train_user.sort()
a = np.percentile(UpperCAmelCase__ , 25 )
a = np.percentile(UpperCAmelCase__ , 75 )
a = qa - qa
a = qa - (iqr * 0.1)
return low_lim
def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :float ):
'''simple docstring'''
a = 0
a = 0
for i in list_vote:
if i > actual_result:
a = not_safe + 1
else:
if abs(abs(UpperCAmelCase__ ) - abs(UpperCAmelCase__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A_ : List[str] = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A_ : Dict = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
A_ : str = Normalizer().fit_transform(data_input_df.values)
# split data
A_ : int = normalize_df[:, 2].tolist()
A_ : List[str] = normalize_df[:, 0].tolist()
A_ : List[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A_ : Tuple = normalize_df[:, [1, 2]].tolist()
A_ : List[Any] = x[: len(x) - 1]
A_ : Optional[Any] = x[len(x) - 1 :]
# for linear regression & sarimax
A_ : Optional[int] = total_date[: len(total_date) - 1]
A_ : Any = total_user[: len(total_user) - 1]
A_ : Tuple = total_match[: len(total_match) - 1]
A_ : List[str] = total_date[len(total_date) - 1 :]
A_ : str = total_user[len(total_user) - 1 :]
A_ : Any = total_match[len(total_match) - 1 :]
# voting system with forecasting
A_ : Tuple = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A_ : Tuple = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : str = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ):
_UpperCAmelCase = '''focalnet'''
def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = use_conv_embed
a = hidden_sizes
a = depths
a = focal_levels
a = focal_windows
a = hidden_act
a = mlp_ratio
a = hidden_dropout_prob
a = drop_path_rate
a = use_layerscale
a = layerscale_value
a = use_post_layernorm
a = use_post_layernorm_in_modulation
a = normalize_modulator
a = initializer_range
a = layer_norm_eps
a = encoder_stride
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 32
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def UpperCAmelCase__ ( ):
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 10_24 )
print("Key files generation successful." )
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
print("Generating prime p..." )
a = rabinMiller.generate_large_prime(UpperCAmelCase__ )
print("Generating prime q..." )
a = rabinMiller.generate_large_prime(UpperCAmelCase__ )
a = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
a = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(UpperCAmelCase__ , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
a = cryptoMath.find_mod_inverse(UpperCAmelCase__ , (p - 1) * (q - 1) )
a = (n, e)
a = (n, d)
return (public_key, private_key)
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :int ):
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
a , a = generate_key(UpperCAmelCase__ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 32
|
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
a , a = head.next, head
while fast and fast.next:
a = fast.next.next
a = slow.next
a = slow.next
a = None # Don't forget here! But forget still works!
# reverse the second part
a = None
while second:
a = second.next
a = node
a = second
a = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a = node.next
a = head.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a = a = a = head
while fast and fast.next:
a , a = fast.next.next, slow.next
# 2. Push the second half into the stack
a = [slow.val]
while slow.next:
a = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a = cur.next
return True
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ):
'''simple docstring'''
if not head or not head.next:
return True
a = {}
a = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase__ )
else:
a = [pos]
a = head.next
pos += 1
a = pos - 1
a = 0
for v in d.values():
if len(UpperCAmelCase__ ) % 2 != 0:
middle += 1
else:
a = 0
for i in range(0 , len(UpperCAmelCase__ ) ):
if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 32
| 1
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class _lowercase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = jnp.floataa
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
a , a , a , a = hidden_states.shape
a = jax.image.resize(
__lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a = self.conv(__lowerCAmelCase )
return hidden_states
class _lowercase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = jnp.floataa
def A ( self : Dict ) -> Any:
"""simple docstring"""
a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.conv(__lowerCAmelCase )
return hidden_states
class _lowercase ( nn.Module ):
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = 0.0
_UpperCAmelCase = None
_UpperCAmelCase = jnp.floataa
def A ( self : str ) -> Any:
"""simple docstring"""
a = self.in_channels if self.out_channels is None else self.out_channels
a = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a = nn.Dense(__lowerCAmelCase , dtype=self.dtype )
a = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a = nn.Dropout(self.dropout_prob )
a = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a = None
if use_nin_shortcut:
a = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=True ) -> Union[str, Any]:
"""simple docstring"""
a = hidden_states
a = self.norma(__lowerCAmelCase )
a = nn.swish(__lowerCAmelCase )
a = self.conva(__lowerCAmelCase )
a = self.time_emb_proj(nn.swish(__lowerCAmelCase ) )
a = jnp.expand_dims(jnp.expand_dims(__lowerCAmelCase , 1 ) , 1 )
a = hidden_states + temb
a = self.norma(__lowerCAmelCase )
a = nn.swish(__lowerCAmelCase )
a = self.dropout(__lowerCAmelCase , __lowerCAmelCase )
a = self.conva(__lowerCAmelCase )
if self.conv_shortcut is not None:
a = self.conv_shortcut(__lowerCAmelCase )
return hidden_states + residual
| 32
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = embedding_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
a = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = self.num_choices
a = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = MobileBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def A ( self : List[Any] ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self : int ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
A_ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase )
a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int = 10_00 ):
'''simple docstring'''
a = 3
a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( UpperCAmelCase__ ):
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a = input_file.read()
a = regexp.search(__lowerCAmelCase )
return match
def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a = regexp.finditer(__lowerCAmelCase )
a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 32
| 1
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
A_ : Optional[Any] = logging.get_logger(__name__)
# General docstring
A_ : List[str] = '''PoolFormerConfig'''
# Base docstring
A_ : Tuple = '''sail/poolformer_s12'''
A_ : Optional[int] = [1, 5_12, 7, 7]
# Image classification docstring
A_ : Optional[Any] = '''sail/poolformer_s12'''
A_ : Union[str, Any] = '''tabby, tabby cat'''
A_ : List[str] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :float = 0.0 , UpperCAmelCase__ :bool = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
a = 1 - drop_prob
a = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
a = keep_prob + torch.rand(UpperCAmelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
a = input.div(UpperCAmelCase__ ) * random_tensor
return output
class _lowercase ( nn.Module ):
def __init__( self : Tuple , __lowerCAmelCase : Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
a = drop_prob
def A ( self : Union[str, Any] , __lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__lowerCAmelCase , self.drop_prob , self.training )
def A ( self : List[str] ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class _lowercase ( nn.Module ):
def __init__( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict=None ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
a = patch_size if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
a = stride if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (stride, stride)
a = padding if isinstance(__lowerCAmelCase , collections.abc.Iterable ) else (padding, padding)
a = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=__lowerCAmelCase )
a = norm_layer(__lowerCAmelCase ) if norm_layer else nn.Identity()
def A ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a = self.projection(__lowerCAmelCase )
a = self.norm(__lowerCAmelCase )
return embeddings
class _lowercase ( nn.GroupNorm ):
def __init__( self : Tuple , __lowerCAmelCase : int , **__lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(1 , __lowerCAmelCase , **__lowerCAmelCase )
class _lowercase ( nn.Module ):
def __init__( self : int , __lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
super().__init__()
a = nn.AvgPoolad(__lowerCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__lowerCAmelCase )
def A ( self : Optional[Any] , __lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
return self.pool(__lowerCAmelCase ) - hidden_states
class _lowercase ( nn.Module ):
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
super().__init__()
a = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
a = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
a = PoolFormerDropPath(__lowerCAmelCase )
if isinstance(config.hidden_act , __lowerCAmelCase ):
a = ACTaFN[config.hidden_act]
else:
a = config.hidden_act
def A ( self : Tuple , __lowerCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
a = self.conva(__lowerCAmelCase )
a = self.act_fn(__lowerCAmelCase )
a = self.drop(__lowerCAmelCase )
a = self.conva(__lowerCAmelCase )
a = self.drop(__lowerCAmelCase )
return hidden_states
class _lowercase ( nn.Module ):
def __init__( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
a = PoolFormerPooling(__lowerCAmelCase )
a = PoolFormerOutput(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a = PoolFormerGroupNorm(__lowerCAmelCase )
a = PoolFormerGroupNorm(__lowerCAmelCase )
# Useful for training neural nets
a = PoolFormerDropPath(__lowerCAmelCase ) if drop_path > 0.0 else nn.Identity()
a = config.use_layer_scale
if config.use_layer_scale:
a = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCAmelCase) ) , requires_grad=__lowerCAmelCase )
a = nn.Parameter(
config.layer_scale_init_value * torch.ones((__lowerCAmelCase) ) , requires_grad=__lowerCAmelCase )
def A ( self : Dict , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if self.use_layer_scale:
a = self.pooling(self.before_norm(__lowerCAmelCase ) )
a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
a = hidden_states + self.drop_path(__lowerCAmelCase )
a = ()
a = self.output(self.after_norm(__lowerCAmelCase ) )
a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
a = hidden_states + self.drop_path(__lowerCAmelCase )
a = (output,) + outputs
return outputs
else:
a = self.drop_path(self.pooling(self.before_norm(__lowerCAmelCase ) ) )
# First residual connection
a = pooling_output + hidden_states
a = ()
# Second residual connection inside the PoolFormerOutput block
a = self.drop_path(self.output(self.after_norm(__lowerCAmelCase ) ) )
a = hidden_states + layer_output
a = (output,) + outputs
return outputs
class _lowercase ( nn.Module ):
def __init__( self : Any , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
a = config
# stochastic depth decay rule
a = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
a = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
a = nn.ModuleList(__lowerCAmelCase )
# Transformer blocks
a = []
a = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
a = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__lowerCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__lowerCAmelCase ) )
a = nn.ModuleList(__lowerCAmelCase )
def A ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[str]=True ) -> Tuple:
"""simple docstring"""
a = () if output_hidden_states else None
a = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
a , a = layers
# Get patch embeddings from hidden_states
a = embedding_layer(__lowerCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__lowerCAmelCase ):
a = blk(__lowerCAmelCase )
a = layer_outputs[0]
if output_hidden_states:
a = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = PoolFormerConfig
_UpperCAmelCase = '''poolformer'''
_UpperCAmelCase = '''pixel_values'''
_UpperCAmelCase = True
def A ( self : List[str] , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def A ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=False ) -> List[Any]:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a = value
A_ : Any = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A_ : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''', UpperCAmelCase__, )
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Any , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
a = config
a = PoolFormerEncoder(__lowerCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : int , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
a = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
a = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class _lowercase ( nn.Module ):
def __init__( self : Union[str, Any] , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__()
a = nn.Linear(config.hidden_size , config.hidden_size )
def A ( self : int , __lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
a = self.dense(__lowerCAmelCase )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''', UpperCAmelCase__, )
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Dict , __lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
a = config.num_labels
a = PoolFormerModel(__lowerCAmelCase )
# Final norm
a = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
a = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.LongTensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
a = return_dict if return_dict is not None else self.config.use_return_dict
a = self.poolformer(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , )
a = outputs[0]
a = self.classifier(self.norm(__lowerCAmelCase ).mean([-2, -1] ) )
a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a = "single_label_classification"
else:
a = "multi_label_classification"
if self.config.problem_type == "regression":
a = MSELoss()
if self.num_labels == 1:
a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
a = CrossEntropyLoss()
a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a = BCEWithLogitsLoss()
a = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
| 32
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32
| 1
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = str(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" )
def UpperCAmelCase__ ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
a = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
a = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 25),)
def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a , a = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if scheduler is None:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self : Any ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
a = dummy_past_residuals[: scheduler.config.solver_order]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = UniPCMultistepScheduler(**self.get_scheduler_config() )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a = DEISMultistepScheduler.from_config(scheduler.config )
a = DPMSolverMultistepScheduler.from_config(scheduler.config )
a = UniPCMultistepScheduler.from_config(scheduler.config )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
a = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.full_loop()
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = self.full_loop(prediction_type="v_prediction" )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 32
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( UpperCAmelCase__ ):
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a = input_file.read()
a = regexp.search(__lowerCAmelCase )
return match
def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a = regexp.finditer(__lowerCAmelCase )
a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 32
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]:
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = num_stages
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = intermediate_size
a = hidden_act
a = num_labels
a = initializer_range
a = out_features
a = out_indices
a = scope
def A ( self : Optional[Any] ) -> int:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
a = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
def A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = ConvNextVaModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def A ( self : List[str] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a = self.model_tester.prepare_config_and_inputs_with_labels()
a = False
a = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
a = model(**__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self : Dict ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCAmelCase )
# verify the logits
a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 32
| 1
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
a = log_spec[:, :-1]
a = log_spec - 2_0.0
a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 32
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
a = ""
a = ""
a = []
a = 0
a = 256
a = 0
a = 0
a = 0
a = 0
def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int:
"""simple docstring"""
a = cva.imread(__lowerCAmelCase , 0 )
a = copy.deepcopy(self.img )
a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
a = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
a = x[i] / self.k
self.sk += prk
a = (self.L - 1) * self.sk
if self.rem != 0:
a = int(last % last )
a = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
a = int(np.ma.count(self.img ) / self.img[1].size )
a = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a = self.img[j][i]
if num != self.last_list[num]:
a = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def A ( self : Any ) -> int:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def A ( self : Any ) -> int:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
A_ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32
| 1
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
_UpperCAmelCase = ['''accelerate''', '''launch''']
_UpperCAmelCase = Path.home() / '''.cache/huggingface/accelerate'''
_UpperCAmelCase = '''default_config.yaml'''
_UpperCAmelCase = config_folder / config_file
_UpperCAmelCase = config_folder / '''_default_config.yaml'''
_UpperCAmelCase = Path('''tests/test_configs''' )
@classmethod
def A ( cls : str ) -> List[Any]:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def A ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def A ( self : List[Any] ) -> List[str]:
"""simple docstring"""
a = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def A ( self : int ) -> List[Any]:
"""simple docstring"""
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=__lowerCAmelCase ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(__lowerCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def A ( self : List[Any] ) -> Tuple:
"""simple docstring"""
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = '''test-tpu'''
_UpperCAmelCase = '''us-central1-a'''
_UpperCAmelCase = '''ls'''
_UpperCAmelCase = ['''accelerate''', '''tpu-config''']
_UpperCAmelCase = '''cd /usr/share'''
_UpperCAmelCase = '''tests/test_samples/test_command_file.sh'''
_UpperCAmelCase = '''Running gcloud compute tpus tpu-vm ssh'''
def A ( self : int ) -> List[Any]:
"""simple docstring"""
a = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __lowerCAmelCase , )
def A ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
a = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __lowerCAmelCase , )
def A ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
a = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=__lowerCAmelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __lowerCAmelCase , )
def A ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __lowerCAmelCase , )
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , __lowerCAmelCase , )
def A ( self : str ) -> List[Any]:
"""simple docstring"""
a = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __lowerCAmelCase , )
def A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
a = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=__lowerCAmelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __lowerCAmelCase , )
| 32
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
a = self.unet.config.sample_size
a = (batch_size, 3, img_size, img_size)
a = self.unet
a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
a = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
a = model(__lowerCAmelCase , __lowerCAmelCase ).sample
a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
a , a = output.prev_sample, output.prev_sample_mean
a = sample_mean.clamp(0 , 1 )
a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
a = ""
while len(UpperCAmelCase__ ) % 3 != 0:
a = "0" + bin_string
a = [
bin_string[index : index + 3]
for index in range(len(UpperCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
a = 0
for index, val in enumerate(UpperCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(UpperCAmelCase__ ) )
oct_string += str(UpperCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 32
|
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 1_00
a = (5 * (century % 4) + 2) % 7
a = year % 1_00
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : Optional[int] = get_tests_dir('''fixtures''')
A_ : Optional[Any] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
A_ : Tuple = get_tests_dir('''fixtures/dummy-config.json''')
class _lowercase ( unittest.TestCase ):
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = 0
def A ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : int ) -> Any:
"""simple docstring"""
a = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : Tuple ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
a = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase ).to_dict()
config_dict.pop("feature_extractor_type" )
a = WavaVecaFeatureExtractor(**__lowerCAmelCase )
# save in new folder
model_config.save_pretrained(__lowerCAmelCase )
config.save_pretrained(__lowerCAmelCase )
a = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
# make sure private variable is not incorrectly saved
a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[Any] ) -> str:
"""simple docstring"""
a = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ):
a = AutoFeatureExtractor.from_pretrained("bert-base" )
def A ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCAmelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase , revision="aaaaaa" )
def A ( self : int ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCAmelCase , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def A ( self : Any ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCAmelCase )
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCAmelCase )
a = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def A ( self : Tuple ) -> List[str]:
"""simple docstring"""
try:
AutoConfig.register("custom" , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
a = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__lowerCAmelCase )
a = AutoFeatureExtractor.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def A ( self : Dict ) -> List[Any]:
"""simple docstring"""
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = True
try:
AutoConfig.register("custom" , __lowerCAmelCase )
AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase )
# If remote code is not set, the default is to use local
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__lowerCAmelCase , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 32
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32
| 1
|
A_ : Any = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
A_ : int = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = "Morse code here!"
print(UpperCAmelCase__ )
a = encrypt(UpperCAmelCase__ )
print(UpperCAmelCase__ )
a = decrypt(UpperCAmelCase__ )
print(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''rwkv'''
_UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
a = vocab_size
a = context_length
a = hidden_size
a = num_hidden_layers
a = attention_hidden_size if attention_hidden_size is not None else hidden_size
a = intermediate_size if intermediate_size is not None else 4 * hidden_size
a = layer_norm_epsilon
a = rescale_every
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
a = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase__ )
if number < 0:
return False
a = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
a = spectrogram_length
a = num_channels
a = patch_size
a = feature_size // self.patch_size[1]
a = n_fft
a = sampling_rate // hop_length_to_sampling_rate
a = sampling_rate
a = padding_value
a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray:
"""simple docstring"""
a = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
a = log_spec[:, :-1]
a = log_spec - 2_0.0
a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
a = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
a = audio_features[i]
a = feature
# return as BatchFeature
if return_attention_mask:
a = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
a = {"audio_values": padded_audio_features}
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 32
| 1
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :List[Any] ):
'''simple docstring'''
a = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
a = downstream_dict["projector.weight"]
a = downstream_dict["projector.bias"]
a = downstream_dict["model.post_net.linear.weight"]
a = downstream_dict["model.post_net.linear.bias"]
return model
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Dict ):
'''simple docstring'''
a = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
a = downstream_dict["model.linear.weight"]
a = downstream_dict["model.linear.bias"]
return model
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :int , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = WavaVecaForXVector.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
a = downstream_dict["connector.weight"]
a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :str , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
a = torch.load(UpperCAmelCase__ , map_location="cpu" )
a = checkpoint["Downstream"]
a = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
a = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ )
a = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a = convert_classification(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a = convert_diarization(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
elif arch.endswith("ForXVector" ):
a = convert_xvector(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
A_ : Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 32
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any:
"""simple docstring"""
a = parent
a = batch_size
a = is_training
a = use_auxiliary_loss
a = num_queries
a = num_channels
a = min_size
a = max_size
a = num_labels
a = mask_feature_size
def A ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A ( self : str ) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
a , a , a , a , a = self.prepare_config_and_inputs()
a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
a = output.encoder_hidden_states
a = output.pixel_decoder_hidden_states
a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
a = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
a = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = MaskFormerModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : int ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def A ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
a = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A ( self : str ) -> Dict:
"""simple docstring"""
a = (self.model_tester.min_size,) * 2
a = {
"pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def A ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A ( self : List[str] ) -> Any:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = True
a = True
a = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : int = 1E-4
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : int ) -> Optional[int]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
a = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
a = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : List[Any] ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = prepare_img()
a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
a = model(**__lowerCAmelCase )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A ( self : int ) -> Any:
"""simple docstring"""
a = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(__lowerCAmelCase )
.eval()
)
a = self.default_image_processor
a = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a = inputs["pixel_values"].to(__lowerCAmelCase )
a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]]
a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
a = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 32
| 1
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :list[int] , UpperCAmelCase__ :list[int] , UpperCAmelCase__ :int ):
'''simple docstring'''
a = list(range(len(UpperCAmelCase__ ) ) )
a = [v / w for v, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
index.sort(key=lambda UpperCAmelCase__ : ratio[i] , reverse=UpperCAmelCase__ )
a = 0
a = [0] * len(UpperCAmelCase__ )
for i in index:
if weight[i] <= capacity:
a = 1
max_value += value[i]
capacity -= weight[i]
else:
a = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowercase ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
a = [[1, 2, 4], [1, 2, 3, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A ( self : Tuple ) -> Dict:
"""simple docstring"""
a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def A ( self : int ) -> Any:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
a = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(3 )
a = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a = DisjunctiveConstraint(__lowerCAmelCase )
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 32
| 1
|
def UpperCAmelCase__ ( UpperCAmelCase__ :bytes ):
'''simple docstring'''
return "".join([hex(UpperCAmelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCAmelCase__ )] )
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
if (len(UpperCAmelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCAmelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCAmelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ :int ):
'''simple docstring'''
a = str(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" )
def UpperCAmelCase__ ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
a = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
a = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( _lowerCamelCase, unittest.TestCase ):
_UpperCAmelCase = CanineTokenizer
_UpperCAmelCase = False
def A ( self : List[Any] ) -> str:
"""simple docstring"""
super().setUp()
a = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : int ) -> str:
"""simple docstring"""
return CanineTokenizer.from_pretrained("google/canine-s" )
def A ( self : Optional[int] , **__lowerCAmelCase : int ) -> CanineTokenizer:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
a = 1024
return tokenizer
@require_torch
def A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
a = self.canine_tokenizer
a = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
a = tokenizer(A__ , padding=A__ , return_tensors="pt" )
self.assertIsInstance(A__ , A__ )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(A__ , A__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def A ( self : List[Any] ) -> List[str]:
"""simple docstring"""
a = self.canine_tokenizer
a = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
a = tokenizer(A__ , padding=A__ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , A__ )
self.assertIn("attention_mask" , A__ )
self.assertIn("token_type_ids" , A__ )
@require_torch
def A ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = self.canine_tokenizer
a = [
"What's the weater?",
"It's about 25 degrees.",
]
a = tokenizer(
text_target=A__ , max_length=32 , padding="max_length" , truncation=A__ , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def A ( self : int ) -> Optional[Any]:
"""simple docstring"""
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = " He is very happy, UNwant\u00E9d,running"
a = tokenizer.encode(A__ , add_special_tokens=A__ )
tokenizer.save_pretrained(A__ )
a = tokenizer.__class__.from_pretrained(A__ )
a = after_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
shutil.rmtree(A__ )
a = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
a = tempfile.mkdtemp()
a = " He is very happy, UNwant\u00E9d,running"
a = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a = chr(0Xe_007 )
additional_special_tokens.append(A__ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
a = tokenizer.encode(A__ , add_special_tokens=A__ )
tokenizer.save_pretrained(A__ )
a = tokenizer.__class__.from_pretrained(A__ )
a = after_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
self.assertIn(A__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a = tokenizer.__class__.from_pretrained(A__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A__ )
def A ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
a = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a , a = self.get_clean_sequence(A__ )
# a special token for Canine can be defined as follows:
a = 0Xe_005
a = chr(A__ )
tokenizer.add_special_tokens({"cls_token": special_token} )
a = tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertEqual(len(A__ ) , 1 )
a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=A__ )
a = tokenizer.encode(A__ , add_special_tokens=A__ )
a = tokenizer.encode(A__ , add_special_tokens=A__ )
a = tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertEqual(A__ , input_encoded + special_token_id )
a = tokenizer.decode(A__ , skip_special_tokens=A__ )
self.assertTrue(special_token not in decoded )
def A ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
a = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a = chr(0Xe_005 )
a = chr(0Xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=A__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
a = tokenizer.tokenize(A__ )
a = tokenizer.tokenize(A__ )
self.assertEqual(len(A__ ) , 1 )
self.assertEqual(len(A__ ) , 1 )
self.assertEqual(token_a[0] , A__ )
self.assertEqual(token_a[0] , A__ )
@require_tokenizers
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
a = 0Xe_006
a = chr(A__ )
a = AddedToken(A__ , lstrip=A__ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(A__ )
tokenizer.from_pretrained(A__ )
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
a = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A__ )
with open(os.path.join(A__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
a = json.load(A__ )
with open(os.path.join(A__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
a = json.load(A__ )
# a special token for Canine can be defined as follows:
a = 0Xe_006
a = chr(A__ )
a = [new_token_a]
a = [new_token_a]
with open(os.path.join(A__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(A__ , A__ )
with open(os.path.join(A__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(A__ , A__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a = tokenizer_class.from_pretrained(A__ , extra_ids=0 )
self.assertIn(A__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a = 0Xe_007
a = chr(A__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a = [AddedToken(A__ , lstrip=A__ )]
a = tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , extra_ids=0 )
self.assertIn(A__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def A ( self : List[str] ) -> List[str]:
"""simple docstring"""
a = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a = "hello world"
if self.space_between_special_tokens:
a = "[CLS] hello world [SEP]"
else:
a = input
a = tokenizer.encode(A__ , add_special_tokens=A__ )
a = tokenizer.decode(A__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(A__ , [output, output.lower()] )
def A ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
a = "a"
a = ord(A__ )
for attr in attributes_list:
setattr(A__ , attr + "_id" , A__ )
self.assertEqual(getattr(A__ , A__ ) , A__ )
self.assertEqual(getattr(A__ , attr + "_id" ) , A__ )
setattr(A__ , attr + "_id" , A__ )
self.assertEqual(getattr(A__ , A__ ) , A__ )
self.assertEqual(getattr(A__ , attr + "_id" ) , A__ )
setattr(A__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(A__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(A__ , "additional_special_tokens_ids" ) , [] )
a = 0Xe_006
a = chr(A__ )
setattr(A__ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(A__ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(A__ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def A ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
def A ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
def A ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass
def A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def A ( self : Tuple ) -> int:
"""simple docstring"""
pass
def A ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
| 700
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' )
class _lowercase ( unittest.TestCase ):
@cached_property
def A ( self : List[str] ) -> int:
"""simple docstring"""
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def A ( self : Dict ) -> Any:
"""simple docstring"""
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 32
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.