ZTWHHH commited on
Commit
9cbb62b
·
verified ·
1 Parent(s): b739e3b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_internvl/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  2. evalkit_internvl/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc +0 -0
  3. evalkit_internvl/lib/python3.10/site-packages/transformers/data/__init__.py +44 -0
  4. evalkit_internvl/lib/python3.10/site-packages/transformers/data/data_collator.py +1568 -0
  5. evalkit_internvl/lib/python3.10/site-packages/transformers/data/metrics/__init__.py +98 -0
  6. evalkit_internvl/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
  7. evalkit_internvl/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc +0 -0
  8. evalkit_internvl/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py +780 -0
  9. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__init__.py +18 -0
  10. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc +0 -0
  11. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc +0 -0
  12. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc +0 -0
  13. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc +0 -0
  14. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc +0 -0
  15. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/glue.py +643 -0
  16. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/squad.py +845 -0
  17. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/utils.py +349 -0
  18. evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/xnli.py +97 -0
  19. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/__init__.cpython-310.pyc +0 -0
  20. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/auto_pipeline.cpython-310.pyc +0 -0
  21. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/free_init_utils.cpython-310.pyc +0 -0
  22. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/onnx_utils.cpython-310.pyc +0 -0
  23. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/pipeline_flax_utils.cpython-310.pyc +0 -0
  24. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__init__.py +49 -0
  25. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__pycache__/__init__.cpython-310.pyc +0 -0
  26. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__pycache__/pipeline_animatediff.cpython-310.pyc +0 -0
  27. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__pycache__/pipeline_animatediff_video2video.cpython-310.pyc +0 -0
  28. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__pycache__/pipeline_output.cpython-310.pyc +0 -0
  29. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py +997 -0
  30. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/pipeline_output.py +23 -0
  31. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__init__.py +80 -0
  32. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py +532 -0
  33. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/__init__.cpython-310.pyc +0 -0
  34. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_inpainting_superresolution.cpython-310.pyc +0 -0
  35. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/safety_checker.cpython-310.pyc +0 -0
  36. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/dit/__pycache__/pipeline_dit.cpython-310.pyc +0 -0
  37. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/dit/pipeline_dit.py +233 -0
  38. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/kandinsky/__pycache__/__init__.cpython-310.pyc +0 -0
  39. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/kandinsky/__pycache__/pipeline_kandinsky_combined.cpython-310.pyc +0 -0
  40. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/kandinsky/__pycache__/text_encoder.cpython-310.pyc +0 -0
  41. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py +98 -0
  42. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/__init__.py +50 -0
  43. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/__pycache__/__init__.cpython-310.pyc +0 -0
  44. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/__pycache__/pipeline_latent_diffusion_superresolution.cpython-310.pyc +0 -0
  45. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +746 -0
  46. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +189 -0
  47. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/ledits_pp/__pycache__/__init__.cpython-310.pyc +0 -0
  48. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/ledits_pp/__pycache__/pipeline_leditspp_stable_diffusion.cpython-310.pyc +0 -0
  49. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +48 -0
  50. evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_attend_and_excite/__pycache__/__init__.cpython-310.pyc +0 -0
evalkit_internvl/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (822 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc ADDED
Binary file (49 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/data/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .data_collator import (
16
+ DataCollatorForLanguageModeling,
17
+ DataCollatorForPermutationLanguageModeling,
18
+ DataCollatorForSeq2Seq,
19
+ DataCollatorForSOP,
20
+ DataCollatorForTokenClassification,
21
+ DataCollatorForWholeWordMask,
22
+ DataCollatorWithPadding,
23
+ DefaultDataCollator,
24
+ default_data_collator,
25
+ )
26
+ from .metrics import glue_compute_metrics, xnli_compute_metrics
27
+ from .processors import (
28
+ DataProcessor,
29
+ InputExample,
30
+ InputFeatures,
31
+ SingleSentenceClassificationProcessor,
32
+ SquadExample,
33
+ SquadFeatures,
34
+ SquadV1Processor,
35
+ SquadV2Processor,
36
+ glue_convert_examples_to_features,
37
+ glue_output_modes,
38
+ glue_processors,
39
+ glue_tasks_num_labels,
40
+ squad_convert_examples_to_features,
41
+ xnli_output_modes,
42
+ xnli_processors,
43
+ xnli_tasks_num_labels,
44
+ )
evalkit_internvl/lib/python3.10/site-packages/transformers/data/data_collator.py ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import random
16
+ import warnings
17
+ from collections.abc import Mapping
18
+ from dataclasses import dataclass
19
+ from random import randint
20
+ from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ..models.bert import BertTokenizer, BertTokenizerFast
25
+ from ..tokenization_utils_base import PreTrainedTokenizerBase
26
+ from ..utils import PaddingStrategy
27
+
28
+
29
+ InputDataClass = NewType("InputDataClass", Any)
30
+
31
+ """
32
+ A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
33
+ of PyTorch/TensorFlow tensors or NumPy arrays.
34
+ """
35
+ DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, Any]])
36
+
37
+
38
+ class DataCollatorMixin:
39
+ def __call__(self, features, return_tensors=None):
40
+ if return_tensors is None:
41
+ return_tensors = self.return_tensors
42
+ if return_tensors == "tf":
43
+ return self.tf_call(features)
44
+ elif return_tensors == "pt":
45
+ return self.torch_call(features)
46
+ elif return_tensors == "np":
47
+ return self.numpy_call(features)
48
+ else:
49
+ raise ValueError(f"Framework '{return_tensors}' not recognized!")
50
+
51
+
52
+ def pad_without_fast_tokenizer_warning(tokenizer, *pad_args, **pad_kwargs):
53
+ """
54
+ Pads without triggering the warning about how using the pad function is sub-optimal when using a fast tokenizer.
55
+ """
56
+
57
+ # To avoid errors when using Feature extractors
58
+ if not hasattr(tokenizer, "deprecation_warnings"):
59
+ return tokenizer.pad(*pad_args, **pad_kwargs)
60
+
61
+ # Save the state of the warning, then disable it
62
+ warning_state = tokenizer.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False)
63
+ tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
64
+
65
+ try:
66
+ padded = tokenizer.pad(*pad_args, **pad_kwargs)
67
+ finally:
68
+ # Restore the state of the warning.
69
+ tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = warning_state
70
+
71
+ return padded
72
+
73
+
74
+ def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]:
75
+ """
76
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
77
+ potential keys named:
78
+
79
+ - `label`: handles a single value (int or float) per object
80
+ - `label_ids`: handles a list of values per object
81
+
82
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
83
+ to the model. See glue and ner for example of how it's useful.
84
+ """
85
+
86
+ # In this function we'll make the assumption that all `features` in the batch
87
+ # have the same attributes.
88
+ # So we will look at the first element as a proxy for what attributes exist
89
+ # on the whole batch.
90
+
91
+ if return_tensors == "pt":
92
+ return torch_default_data_collator(features)
93
+ elif return_tensors == "tf":
94
+ return tf_default_data_collator(features)
95
+ elif return_tensors == "np":
96
+ return numpy_default_data_collator(features)
97
+
98
+
99
+ @dataclass
100
+ class DefaultDataCollator(DataCollatorMixin):
101
+ """
102
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
103
+ potential keys named:
104
+
105
+ - `label`: handles a single value (int or float) per object
106
+ - `label_ids`: handles a list of values per object
107
+
108
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
109
+ to the model. See glue and ner for example of how it's useful.
110
+
111
+ This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
112
+ helpful if you need to set a return_tensors value at initialization.
113
+
114
+ Args:
115
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
116
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
117
+ """
118
+
119
+ return_tensors: str = "pt"
120
+
121
+ def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
122
+ if return_tensors is None:
123
+ return_tensors = self.return_tensors
124
+ return default_data_collator(features, return_tensors)
125
+
126
+
127
+ def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
128
+ import torch
129
+
130
+ if not isinstance(features[0], Mapping):
131
+ features = [vars(f) for f in features]
132
+ first = features[0]
133
+ batch = {}
134
+
135
+ # Special handling for labels.
136
+ # Ensure that tensor is created with the correct type
137
+ # (it should be automatically the case, but let's make sure of it.)
138
+ if "label" in first and first["label"] is not None:
139
+ label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
140
+ dtype = torch.long if isinstance(label, int) else torch.float
141
+ batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
142
+ elif "label_ids" in first and first["label_ids"] is not None:
143
+ if isinstance(first["label_ids"], torch.Tensor):
144
+ batch["labels"] = torch.stack([f["label_ids"] for f in features])
145
+ else:
146
+ dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
147
+ batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
148
+
149
+ # Handling of all other possible keys.
150
+ # Again, we will use the first element to figure out which key/values are not None for this model.
151
+ for k, v in first.items():
152
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
153
+ if isinstance(v, torch.Tensor):
154
+ batch[k] = torch.stack([f[k] for f in features])
155
+ elif isinstance(v, np.ndarray):
156
+ batch[k] = torch.tensor(np.stack([f[k] for f in features]))
157
+ else:
158
+ batch[k] = torch.tensor([f[k] for f in features])
159
+
160
+ return batch
161
+
162
+
163
+ def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
164
+ import tensorflow as tf
165
+
166
+ if not isinstance(features[0], Mapping):
167
+ features = [vars(f) for f in features]
168
+ first = features[0]
169
+ batch = {}
170
+
171
+ # Special handling for labels.
172
+ # Ensure that tensor is created with the correct type
173
+ # (it should be automatically the case, but let's make sure of it.)
174
+ if "label" in first and first["label"] is not None:
175
+ label_col_name = "label"
176
+ elif "label_ids" in first and first["label_ids"] is not None:
177
+ label_col_name = "label_ids"
178
+ elif "labels" in first and first["labels"] is not None:
179
+ label_col_name = "labels"
180
+ else:
181
+ label_col_name = None
182
+ if label_col_name is not None:
183
+ if isinstance(first[label_col_name], tf.Tensor):
184
+ dtype = tf.int64 if first[label_col_name].dtype.is_integer else tf.float32
185
+ elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
186
+ dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
187
+ elif isinstance(first[label_col_name], (tuple, list)):
188
+ dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32
189
+ else:
190
+ dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32
191
+ batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype)
192
+ # Handling of all other possible keys.
193
+ # Again, we will use the first element to figure out which key/values are not None for this model.
194
+ for k, v in first.items():
195
+ if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str):
196
+ if isinstance(v, (tf.Tensor, np.ndarray)):
197
+ batch[k] = tf.stack([f[k] for f in features])
198
+ else:
199
+ batch[k] = tf.convert_to_tensor([f[k] for f in features])
200
+
201
+ return batch
202
+
203
+
204
+ def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
205
+ if not isinstance(features[0], Mapping):
206
+ features = [vars(f) for f in features]
207
+ first = features[0]
208
+ batch = {}
209
+
210
+ # Special handling for labels.
211
+ # Ensure that tensor is created with the correct type
212
+ # (it should be automatically the case, but let's make sure of it.)
213
+ if "label" in first and first["label"] is not None:
214
+ label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"]
215
+ dtype = np.int64 if isinstance(label, int) else np.float32
216
+ batch["labels"] = np.array([f["label"] for f in features], dtype=dtype)
217
+ elif "label_ids" in first and first["label_ids"] is not None:
218
+ if isinstance(first["label_ids"], np.ndarray):
219
+ batch["labels"] = np.stack([f["label_ids"] for f in features])
220
+ else:
221
+ dtype = np.int64 if isinstance(first["label_ids"][0], int) else np.float32
222
+ batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)
223
+
224
+ # Handling of all other possible keys.
225
+ # Again, we will use the first element to figure out which key/values are not None for this model.
226
+ for k, v in first.items():
227
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
228
+ if isinstance(v, np.ndarray):
229
+ batch[k] = np.stack([f[k] for f in features])
230
+ else:
231
+ batch[k] = np.array([f[k] for f in features])
232
+
233
+ return batch
234
+
235
+
236
+ @dataclass
237
+ class DataCollatorWithPadding:
238
+ """
239
+ Data collator that will dynamically pad the inputs received.
240
+
241
+ Args:
242
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
243
+ The tokenizer used for encoding the data.
244
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
245
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
246
+ among:
247
+
248
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
249
+ sequence is provided).
250
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
251
+ acceptable input length for the model if that argument is not provided.
252
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
253
+ max_length (`int`, *optional*):
254
+ Maximum length of the returned list and optionally padding length (see above).
255
+ pad_to_multiple_of (`int`, *optional*):
256
+ If set will pad the sequence to a multiple of the provided value.
257
+
258
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
259
+ 7.5 (Volta).
260
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
261
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
262
+ """
263
+
264
+ tokenizer: PreTrainedTokenizerBase
265
+ padding: Union[bool, str, PaddingStrategy] = True
266
+ max_length: Optional[int] = None
267
+ pad_to_multiple_of: Optional[int] = None
268
+ return_tensors: str = "pt"
269
+
270
+ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
271
+ batch = pad_without_fast_tokenizer_warning(
272
+ self.tokenizer,
273
+ features,
274
+ padding=self.padding,
275
+ max_length=self.max_length,
276
+ pad_to_multiple_of=self.pad_to_multiple_of,
277
+ return_tensors=self.return_tensors,
278
+ )
279
+ if "label" in batch:
280
+ batch["labels"] = batch["label"]
281
+ del batch["label"]
282
+ if "label_ids" in batch:
283
+ batch["labels"] = batch["label_ids"]
284
+ del batch["label_ids"]
285
+ return batch
286
+
287
+
288
+ @dataclass
289
+ class DataCollatorForTokenClassification(DataCollatorMixin):
290
+ """
291
+ Data collator that will dynamically pad the inputs received, as well as the labels.
292
+
293
+ Args:
294
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
295
+ The tokenizer used for encoding the data.
296
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
297
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
298
+ among:
299
+
300
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
301
+ sequence is provided).
302
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
303
+ acceptable input length for the model if that argument is not provided.
304
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
305
+ max_length (`int`, *optional*):
306
+ Maximum length of the returned list and optionally padding length (see above).
307
+ pad_to_multiple_of (`int`, *optional*):
308
+ If set will pad the sequence to a multiple of the provided value.
309
+
310
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
311
+ 7.5 (Volta).
312
+ label_pad_token_id (`int`, *optional*, defaults to -100):
313
+ The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
314
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
315
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
316
+ """
317
+
318
+ tokenizer: PreTrainedTokenizerBase
319
+ padding: Union[bool, str, PaddingStrategy] = True
320
+ max_length: Optional[int] = None
321
+ pad_to_multiple_of: Optional[int] = None
322
+ label_pad_token_id: int = -100
323
+ return_tensors: str = "pt"
324
+
325
+ def torch_call(self, features):
326
+ import torch
327
+
328
+ label_name = "label" if "label" in features[0].keys() else "labels"
329
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
330
+
331
+ no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
332
+
333
+ batch = pad_without_fast_tokenizer_warning(
334
+ self.tokenizer,
335
+ no_labels_features,
336
+ padding=self.padding,
337
+ max_length=self.max_length,
338
+ pad_to_multiple_of=self.pad_to_multiple_of,
339
+ return_tensors="pt",
340
+ )
341
+
342
+ if labels is None:
343
+ return batch
344
+
345
+ sequence_length = batch["input_ids"].shape[1]
346
+ padding_side = self.tokenizer.padding_side
347
+
348
+ def to_list(tensor_or_iterable):
349
+ if isinstance(tensor_or_iterable, torch.Tensor):
350
+ return tensor_or_iterable.tolist()
351
+ return list(tensor_or_iterable)
352
+
353
+ if padding_side == "right":
354
+ batch[label_name] = [
355
+ to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
356
+ ]
357
+ else:
358
+ batch[label_name] = [
359
+ [self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
360
+ ]
361
+
362
+ batch[label_name] = torch.tensor(batch[label_name], dtype=torch.int64)
363
+ return batch
364
+
365
+ def tf_call(self, features):
366
+ import tensorflow as tf
367
+
368
+ label_name = "label" if "label" in features[0].keys() else "labels"
369
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
370
+ batch = pad_without_fast_tokenizer_warning(
371
+ self.tokenizer,
372
+ features,
373
+ padding=self.padding,
374
+ max_length=self.max_length,
375
+ pad_to_multiple_of=self.pad_to_multiple_of,
376
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
377
+ return_tensors="tf" if labels is None else None,
378
+ )
379
+
380
+ if labels is None:
381
+ return batch
382
+
383
+ sequence_length = tf.convert_to_tensor(batch["input_ids"]).shape[1]
384
+ padding_side = self.tokenizer.padding_side
385
+ if padding_side == "right":
386
+ batch["labels"] = [
387
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
388
+ ]
389
+ else:
390
+ batch["labels"] = [
391
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
392
+ ]
393
+
394
+ batch = {k: tf.convert_to_tensor(v, dtype=tf.int64) for k, v in batch.items()}
395
+ return batch
396
+
397
+ def numpy_call(self, features):
398
+ label_name = "label" if "label" in features[0].keys() else "labels"
399
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
400
+ batch = pad_without_fast_tokenizer_warning(
401
+ self.tokenizer,
402
+ features,
403
+ padding=self.padding,
404
+ max_length=self.max_length,
405
+ pad_to_multiple_of=self.pad_to_multiple_of,
406
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
407
+ return_tensors="np" if labels is None else None,
408
+ )
409
+
410
+ if labels is None:
411
+ return batch
412
+
413
+ sequence_length = np.array(batch["input_ids"]).shape[1]
414
+ padding_side = self.tokenizer.padding_side
415
+ if padding_side == "right":
416
+ batch["labels"] = [
417
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
418
+ ]
419
+ else:
420
+ batch["labels"] = [
421
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
422
+ ]
423
+
424
+ batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
425
+ return batch
426
+
427
+
428
+ def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
429
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
430
+ import torch
431
+
432
+ # Tensorize if necessary.
433
+ if isinstance(examples[0], (list, tuple, np.ndarray)):
434
+ examples = [torch.tensor(e, dtype=torch.long) for e in examples]
435
+
436
+ length_of_first = examples[0].size(0)
437
+
438
+ # Check if padding is necessary.
439
+
440
+ are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
441
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
442
+ return torch.stack(examples, dim=0)
443
+
444
+ # If yes, check if we have a `pad_token`.
445
+ if tokenizer._pad_token is None:
446
+ raise ValueError(
447
+ "You are attempting to pad samples but the tokenizer you are using"
448
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
449
+ )
450
+
451
+ # Creating the full tensor and filling it with our data.
452
+ max_length = max(x.size(0) for x in examples)
453
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
454
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
455
+ result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
456
+ for i, example in enumerate(examples):
457
+ if tokenizer.padding_side == "right":
458
+ result[i, : example.shape[0]] = example
459
+ else:
460
+ result[i, -example.shape[0] :] = example
461
+ return result
462
+
463
+
464
+ def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
465
+ import tensorflow as tf
466
+
467
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
468
+ # Tensorize if necessary.
469
+ if isinstance(examples[0], (list, tuple)):
470
+ examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples]
471
+
472
+ # Check if padding is necessary.
473
+ length_of_first = len(examples[0])
474
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
475
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
476
+ return tf.stack(examples, axis=0)
477
+
478
+ # If yes, check if we have a `pad_token`.
479
+ if tokenizer._pad_token is None:
480
+ raise ValueError(
481
+ "You are attempting to pad samples but the tokenizer you are using"
482
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
483
+ )
484
+
485
+ # Creating the full tensor and filling it with our data.
486
+ max_length = max(len(x) for x in examples)
487
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
488
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
489
+ # result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
490
+ result = []
491
+ rank = tf.rank(examples[0])
492
+ paddings = np.zeros((rank, 2), dtype=np.int32)
493
+ for example in examples:
494
+ if tokenizer.padding_side == "right":
495
+ paddings[0, 1] = max_length - len(example)
496
+ else:
497
+ paddings[0, 0] = max_length - len(example)
498
+ result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id))
499
+ return tf.stack(result, axis=0)
500
+
501
+
502
+ def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
503
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
504
+ # Tensorize if necessary.
505
+ if isinstance(examples[0], (list, tuple)):
506
+ examples = [np.array(e, dtype=np.int64) for e in examples]
507
+
508
+ # Check if padding is necessary.
509
+ length_of_first = len(examples[0])
510
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
511
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
512
+ return np.stack(examples, axis=0)
513
+
514
+ # If yes, check if we have a `pad_token`.
515
+ if tokenizer._pad_token is None:
516
+ raise ValueError(
517
+ "You are attempting to pad samples but the tokenizer you are using"
518
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
519
+ )
520
+
521
+ # Creating the full tensor and filling it with our data.
522
+ max_length = max(len(x) for x in examples)
523
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
524
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
525
+ result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
526
+ for i, example in enumerate(examples):
527
+ if tokenizer.padding_side == "right":
528
+ result[i, : example.shape[0]] = example
529
+ else:
530
+ result[i, -example.shape[0] :] = example
531
+ return result
532
+
533
+
534
+ def tolist(x):
535
+ if isinstance(x, list):
536
+ return x
537
+ elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
538
+ x = x.numpy()
539
+ return x.tolist()
540
+
541
+
542
+ @dataclass
543
+ class DataCollatorForSeq2Seq:
544
+ """
545
+ Data collator that will dynamically pad the inputs received, as well as the labels.
546
+
547
+ Args:
548
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
549
+ The tokenizer used for encoding the data.
550
+ model ([`PreTrainedModel`], *optional*):
551
+ The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
552
+ prepare the *decoder_input_ids*
553
+
554
+ This is useful when using *label_smoothing* to avoid calculating loss twice.
555
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
556
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
557
+ among:
558
+
559
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
560
+ sequence is provided).
561
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
562
+ acceptable input length for the model if that argument is not provided.
563
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
564
+ max_length (`int`, *optional*):
565
+ Maximum length of the returned list and optionally padding length (see above).
566
+ pad_to_multiple_of (`int`, *optional*):
567
+ If set will pad the sequence to a multiple of the provided value.
568
+
569
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
570
+ 7.5 (Volta).
571
+ label_pad_token_id (`int`, *optional*, defaults to -100):
572
+ The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
573
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
574
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
575
+ """
576
+
577
+ tokenizer: PreTrainedTokenizerBase
578
+ model: Optional[Any] = None
579
+ padding: Union[bool, str, PaddingStrategy] = True
580
+ max_length: Optional[int] = None
581
+ pad_to_multiple_of: Optional[int] = None
582
+ label_pad_token_id: int = -100
583
+ return_tensors: str = "pt"
584
+
585
+ def __call__(self, features, return_tensors=None):
586
+ if return_tensors is None:
587
+ return_tensors = self.return_tensors
588
+ labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
589
+ # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
590
+ # same length to return tensors.
591
+ if labels is not None:
592
+ max_label_length = max(len(l) for l in labels)
593
+ if self.pad_to_multiple_of is not None:
594
+ max_label_length = (
595
+ (max_label_length + self.pad_to_multiple_of - 1)
596
+ // self.pad_to_multiple_of
597
+ * self.pad_to_multiple_of
598
+ )
599
+
600
+ padding_side = self.tokenizer.padding_side
601
+ for feature in features:
602
+ remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
603
+ if isinstance(feature["labels"], list):
604
+ feature["labels"] = (
605
+ feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
606
+ )
607
+ elif padding_side == "right":
608
+ feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
609
+ else:
610
+ feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
611
+
612
+ features = pad_without_fast_tokenizer_warning(
613
+ self.tokenizer,
614
+ features,
615
+ padding=self.padding,
616
+ max_length=self.max_length,
617
+ pad_to_multiple_of=self.pad_to_multiple_of,
618
+ return_tensors=return_tensors,
619
+ )
620
+
621
+ # prepare decoder_input_ids
622
+ if (
623
+ labels is not None
624
+ and self.model is not None
625
+ and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
626
+ ):
627
+ decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
628
+ features["decoder_input_ids"] = decoder_input_ids
629
+
630
+ return features
631
+
632
+
633
+ @dataclass
634
+ class DataCollatorForLanguageModeling(DataCollatorMixin):
635
+ """
636
+ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
637
+ are not all of the same length.
638
+
639
+ Args:
640
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
641
+ The tokenizer used for encoding the data.
642
+ mlm (`bool`, *optional*, defaults to `True`):
643
+ Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
644
+ with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
645
+ tokens and the value to predict for the masked token.
646
+ mlm_probability (`float`, *optional*, defaults to 0.15):
647
+ The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
648
+ pad_to_multiple_of (`int`, *optional*):
649
+ If set will pad the sequence to a multiple of the provided value.
650
+ return_tensors (`str`):
651
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
652
+
653
+ <Tip>
654
+
655
+ For best performance, this data collator should be used with a dataset having items that are dictionaries or
656
+ BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
657
+ [`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
658
+
659
+ </Tip>"""
660
+
661
+ tokenizer: PreTrainedTokenizerBase
662
+ mlm: bool = True
663
+ mlm_probability: float = 0.15
664
+ pad_to_multiple_of: Optional[int] = None
665
+ tf_experimental_compile: bool = False
666
+ return_tensors: str = "pt"
667
+
668
+ def __post_init__(self):
669
+ if self.mlm and self.tokenizer.mask_token is None:
670
+ raise ValueError(
671
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. "
672
+ "You should pass `mlm=False` to train on causal language modeling instead."
673
+ )
674
+ if self.tf_experimental_compile:
675
+ import tensorflow as tf
676
+
677
+ self.tf_mask_tokens = tf.function(self.tf_mask_tokens, jit_compile=True)
678
+
679
+ @staticmethod
680
+ def tf_bernoulli(shape, probability):
681
+ import tensorflow as tf
682
+
683
+ prob_matrix = tf.fill(shape, probability)
684
+ return tf.cast(prob_matrix - tf.random.uniform(shape, 0, 1) >= 0, tf.bool)
685
+
686
+ def tf_mask_tokens(
687
+ self, inputs: Any, vocab_size, mask_token_id, special_tokens_mask: Optional[Any] = None
688
+ ) -> Tuple[Any, Any]:
689
+ """
690
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
691
+ """
692
+ import tensorflow as tf
693
+
694
+ mask_token_id = tf.cast(mask_token_id, inputs.dtype)
695
+
696
+ input_shape = tf.shape(inputs)
697
+ # 1 for a special token, 0 for a normal token in the special tokens mask
698
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
699
+ masked_indices = self.tf_bernoulli(input_shape, self.mlm_probability) & ~special_tokens_mask
700
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
701
+ labels = tf.where(masked_indices, inputs, -100)
702
+
703
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
704
+ indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
705
+
706
+ inputs = tf.where(indices_replaced, mask_token_id, inputs)
707
+
708
+ # 10% of the time, we replace masked input tokens with random word
709
+ indices_random = self.tf_bernoulli(input_shape, 0.1) & masked_indices & ~indices_replaced
710
+ random_words = tf.random.uniform(input_shape, maxval=vocab_size, dtype=inputs.dtype)
711
+
712
+ inputs = tf.where(indices_random, random_words, inputs)
713
+
714
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
715
+ return inputs, labels
716
+
717
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
718
+ import tensorflow as tf
719
+
720
+ # Handle dict or lists with proper padding and conversion to tensor.
721
+ if isinstance(examples[0], Mapping):
722
+ batch = pad_without_fast_tokenizer_warning(
723
+ self.tokenizer, examples, return_tensors="tf", pad_to_multiple_of=self.pad_to_multiple_of
724
+ )
725
+ else:
726
+ batch = {
727
+ "input_ids": _tf_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
728
+ }
729
+
730
+ # If special token mask has been preprocessed, pop it from the dict.
731
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
732
+ if self.mlm:
733
+ if special_tokens_mask is None:
734
+ special_tokens_mask = [
735
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
736
+ for val in batch["input_ids"].numpy().tolist()
737
+ ]
738
+ # Cannot directly create as bool
739
+ special_tokens_mask = tf.cast(tf.convert_to_tensor(special_tokens_mask, dtype=tf.int64), tf.bool)
740
+ else:
741
+ special_tokens_mask = tf.cast(special_tokens_mask, tf.bool)
742
+ batch["input_ids"], batch["labels"] = self.tf_mask_tokens(
743
+ tf.cast(batch["input_ids"], tf.int64),
744
+ special_tokens_mask=special_tokens_mask,
745
+ mask_token_id=self.tokenizer.mask_token_id,
746
+ vocab_size=len(self.tokenizer),
747
+ )
748
+ else:
749
+ labels = batch["input_ids"]
750
+ if self.tokenizer.pad_token_id is not None:
751
+ # Replace self.tokenizer.pad_token_id with -100
752
+ labels = tf.where(labels == self.tokenizer.pad_token_id, -100, labels)
753
+ else:
754
+ labels = tf.identity(labels) # Makes a copy, just in case
755
+ batch["labels"] = labels
756
+ return batch
757
+
758
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
759
+ # Handle dict or lists with proper padding and conversion to tensor.
760
+ if isinstance(examples[0], Mapping):
761
+ batch = pad_without_fast_tokenizer_warning(
762
+ self.tokenizer, examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of
763
+ )
764
+ else:
765
+ batch = {
766
+ "input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
767
+ }
768
+
769
+ # If special token mask has been preprocessed, pop it from the dict.
770
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
771
+ if self.mlm:
772
+ batch["input_ids"], batch["labels"] = self.torch_mask_tokens(
773
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
774
+ )
775
+ else:
776
+ labels = batch["input_ids"].clone()
777
+ if self.tokenizer.pad_token_id is not None:
778
+ labels[labels == self.tokenizer.pad_token_id] = -100
779
+ batch["labels"] = labels
780
+ return batch
781
+
782
+ def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
783
+ """
784
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
785
+ """
786
+ import torch
787
+
788
+ labels = inputs.clone()
789
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
790
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
791
+ if special_tokens_mask is None:
792
+ special_tokens_mask = [
793
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
794
+ ]
795
+ special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
796
+ else:
797
+ special_tokens_mask = special_tokens_mask.bool()
798
+
799
+ probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
800
+ masked_indices = torch.bernoulli(probability_matrix).bool()
801
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
802
+
803
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
804
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
805
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
806
+
807
+ # 10% of the time, we replace masked input tokens with random word
808
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
809
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
810
+ inputs[indices_random] = random_words[indices_random]
811
+
812
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
813
+ return inputs, labels
814
+
815
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
816
+ # Handle dict or lists with proper padding and conversion to tensor.
817
+ if isinstance(examples[0], Mapping):
818
+ batch = pad_without_fast_tokenizer_warning(
819
+ self.tokenizer, examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of
820
+ )
821
+ else:
822
+ batch = {
823
+ "input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
824
+ }
825
+
826
+ # If special token mask has been preprocessed, pop it from the dict.
827
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
828
+ if self.mlm:
829
+ batch["input_ids"], batch["labels"] = self.numpy_mask_tokens(
830
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
831
+ )
832
+ else:
833
+ labels = np.copy(batch["input_ids"])
834
+ if self.tokenizer.pad_token_id is not None:
835
+ labels[labels == self.tokenizer.pad_token_id] = -100
836
+ batch["labels"] = labels
837
+ return batch
838
+
839
+ def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
840
+ """
841
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
842
+ """
843
+ labels = np.copy(inputs)
844
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
845
+ probability_matrix = np.full(labels.shape, self.mlm_probability)
846
+ if special_tokens_mask is None:
847
+ special_tokens_mask = [
848
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
849
+ ]
850
+ special_tokens_mask = np.array(special_tokens_mask, dtype=bool)
851
+ else:
852
+ special_tokens_mask = special_tokens_mask.astype(bool)
853
+
854
+ probability_matrix[special_tokens_mask] = 0
855
+ # Numpy doesn't have bernoulli, so we use a binomial with 1 trial
856
+ masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
857
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
858
+
859
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
860
+ indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
861
+ inputs[indices_replaced] = self.tokenizer.mask_token_id
862
+
863
+ # 10% of the time, we replace masked input tokens with random word
864
+ # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
865
+ indices_random = (
866
+ np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
867
+ )
868
+ random_words = np.random.randint(
869
+ low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
870
+ )
871
+ inputs[indices_random] = random_words
872
+
873
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
874
+ return inputs, labels
875
+
876
+
877
+ @dataclass
878
+ class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
879
+ """
880
+ Data collator used for language modeling that masks entire words.
881
+
882
+ - collates batches of tensors, honoring their tokenizer's pad_token
883
+ - preprocesses batches for masked language modeling
884
+
885
+ <Tip>
886
+
887
+ This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
888
+ that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
889
+ produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
890
+
891
+ </Tip>"""
892
+
893
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
894
+ if isinstance(examples[0], Mapping):
895
+ input_ids = [e["input_ids"] for e in examples]
896
+ else:
897
+ input_ids = examples
898
+ examples = [{"input_ids": e} for e in examples]
899
+
900
+ batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
901
+
902
+ mask_labels = []
903
+ for e in examples:
904
+ ref_tokens = []
905
+ for id in tolist(e["input_ids"]):
906
+ token = self.tokenizer._convert_id_to_token(id)
907
+ ref_tokens.append(token)
908
+
909
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
910
+ if "chinese_ref" in e:
911
+ ref_pos = tolist(e["chinese_ref"])
912
+ len_seq = len(e["input_ids"])
913
+ for i in range(len_seq):
914
+ if i in ref_pos:
915
+ ref_tokens[i] = "##" + ref_tokens[i]
916
+ mask_labels.append(self._whole_word_mask(ref_tokens))
917
+ batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
918
+ inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
919
+ return {"input_ids": inputs, "labels": labels}
920
+
921
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
922
+ import tensorflow as tf
923
+
924
+ if isinstance(examples[0], Mapping):
925
+ input_ids = [e["input_ids"] for e in examples]
926
+ else:
927
+ input_ids = examples
928
+ examples = [{"input_ids": e} for e in examples]
929
+
930
+ batch_input = _tf_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
931
+
932
+ mask_labels = []
933
+ for e in examples:
934
+ ref_tokens = []
935
+ for id in tolist(e["input_ids"]):
936
+ token = self.tokenizer._convert_id_to_token(id)
937
+ ref_tokens.append(token)
938
+
939
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
940
+ if "chinese_ref" in e:
941
+ ref_pos = tolist(e["chinese_ref"])
942
+ len_seq = len(e["input_ids"])
943
+ for i in range(len_seq):
944
+ if i in ref_pos:
945
+ ref_tokens[i] = "##" + ref_tokens[i]
946
+ mask_labels.append(self._whole_word_mask(ref_tokens))
947
+ batch_mask = _tf_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
948
+ inputs, labels = self.tf_mask_tokens(tf.cast(batch_input, tf.int64), batch_mask)
949
+ return {"input_ids": inputs, "labels": labels}
950
+
951
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
952
+ if isinstance(examples[0], Mapping):
953
+ input_ids = [e["input_ids"] for e in examples]
954
+ else:
955
+ input_ids = examples
956
+ examples = [{"input_ids": e} for e in examples]
957
+
958
+ batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
959
+
960
+ mask_labels = []
961
+ for e in examples:
962
+ ref_tokens = []
963
+ for id in tolist(e["input_ids"]):
964
+ token = self.tokenizer._convert_id_to_token(id)
965
+ ref_tokens.append(token)
966
+
967
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
968
+ if "chinese_ref" in e:
969
+ ref_pos = tolist(e["chinese_ref"])
970
+ len_seq = len(e["input_ids"])
971
+ for i in range(len_seq):
972
+ if i in ref_pos:
973
+ ref_tokens[i] = "##" + ref_tokens[i]
974
+ mask_labels.append(self._whole_word_mask(ref_tokens))
975
+ batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
976
+ inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
977
+ return {"input_ids": inputs, "labels": labels}
978
+
979
+ def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
980
+ """
981
+ Get 0/1 labels for masked tokens with whole word mask proxy
982
+ """
983
+ if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
984
+ warnings.warn(
985
+ "DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
986
+ "Please refer to the documentation for more information."
987
+ )
988
+
989
+ cand_indexes = []
990
+ for i, token in enumerate(input_tokens):
991
+ if token == "[CLS]" or token == "[SEP]":
992
+ continue
993
+
994
+ if len(cand_indexes) >= 1 and token.startswith("##"):
995
+ cand_indexes[-1].append(i)
996
+ else:
997
+ cand_indexes.append([i])
998
+
999
+ random.shuffle(cand_indexes)
1000
+ num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
1001
+ masked_lms = []
1002
+ covered_indexes = set()
1003
+ for index_set in cand_indexes:
1004
+ if len(masked_lms) >= num_to_predict:
1005
+ break
1006
+ # If adding a whole-word mask would exceed the maximum number of
1007
+ # predictions, then just skip this candidate.
1008
+ if len(masked_lms) + len(index_set) > num_to_predict:
1009
+ continue
1010
+ is_any_index_covered = False
1011
+ for index in index_set:
1012
+ if index in covered_indexes:
1013
+ is_any_index_covered = True
1014
+ break
1015
+ if is_any_index_covered:
1016
+ continue
1017
+ for index in index_set:
1018
+ covered_indexes.add(index)
1019
+ masked_lms.append(index)
1020
+
1021
+ if len(covered_indexes) != len(masked_lms):
1022
+ raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
1023
+ mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
1024
+ return mask_labels
1025
+
1026
+ def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
1027
+ """
1028
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
1029
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
1030
+ """
1031
+ import torch
1032
+
1033
+ if self.tokenizer.mask_token is None:
1034
+ raise ValueError(
1035
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1036
+ " --mlm flag if you want to use this tokenizer."
1037
+ )
1038
+ labels = inputs.clone()
1039
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1040
+
1041
+ probability_matrix = mask_labels
1042
+
1043
+ special_tokens_mask = [
1044
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1045
+ ]
1046
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
1047
+ if self.tokenizer._pad_token is not None:
1048
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
1049
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
1050
+
1051
+ masked_indices = probability_matrix.bool()
1052
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1053
+
1054
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1055
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
1056
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1057
+
1058
+ # 10% of the time, we replace masked input tokens with random word
1059
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
1060
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
1061
+ inputs[indices_random] = random_words[indices_random]
1062
+
1063
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1064
+ return inputs, labels
1065
+
1066
+ def tf_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
1067
+ """
1068
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
1069
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
1070
+ """
1071
+ import tensorflow as tf
1072
+
1073
+ input_shape = tf.shape(inputs)
1074
+ if self.tokenizer.mask_token is None:
1075
+ raise ValueError(
1076
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1077
+ " --mlm flag if you want to use this tokenizer."
1078
+ )
1079
+ labels = tf.identity(inputs)
1080
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1081
+
1082
+ masked_indices = tf.cast(mask_labels, tf.bool)
1083
+
1084
+ special_tokens_mask = [
1085
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels
1086
+ ]
1087
+ masked_indices = masked_indices & ~tf.cast(special_tokens_mask, dtype=tf.bool)
1088
+ if self.tokenizer._pad_token is not None:
1089
+ padding_mask = inputs == self.tokenizer.pad_token_id
1090
+ masked_indices = masked_indices & ~padding_mask
1091
+
1092
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
1093
+ labels = tf.where(masked_indices, inputs, -100)
1094
+
1095
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1096
+ indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
1097
+
1098
+ inputs = tf.where(indices_replaced, self.tokenizer.mask_token_id, inputs)
1099
+
1100
+ # 10% of the time, we replace masked input tokens with random word
1101
+ indices_random = self.tf_bernoulli(input_shape, 0.5) & masked_indices & ~indices_replaced
1102
+ random_words = tf.random.uniform(input_shape, maxval=len(self.tokenizer), dtype=tf.int64)
1103
+ inputs = tf.where(indices_random, random_words, inputs)
1104
+
1105
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1106
+ return inputs, labels
1107
+
1108
+ def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
1109
+ """
1110
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
1111
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
1112
+ """
1113
+ if self.tokenizer.mask_token is None:
1114
+ raise ValueError(
1115
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1116
+ " --mlm flag if you want to use this tokenizer."
1117
+ )
1118
+ labels = np.copy(inputs)
1119
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1120
+
1121
+ masked_indices = mask_labels.astype(bool)
1122
+
1123
+ special_tokens_mask = [
1124
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1125
+ ]
1126
+ masked_indices[np.array(special_tokens_mask, dtype=bool)] = 0
1127
+ if self.tokenizer._pad_token is not None:
1128
+ padding_mask = labels == self.tokenizer.pad_token_id
1129
+ masked_indices[padding_mask] = 0
1130
+
1131
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1132
+
1133
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1134
+ indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
1135
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1136
+
1137
+ # 10% of the time, we replace masked input tokens with random word
1138
+ # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
1139
+ indices_random = (
1140
+ np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
1141
+ )
1142
+ random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
1143
+ inputs[indices_random] = random_words[indices_random]
1144
+
1145
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1146
+ return inputs, labels
1147
+
1148
+
1149
+ @dataclass
1150
+ class DataCollatorForSOP(DataCollatorForLanguageModeling):
1151
+ """
1152
+ Data collator used for sentence order prediction task.
1153
+
1154
+ - collates batches of tensors, honoring their tokenizer's pad_token
1155
+ - preprocesses batches for both masked language modeling and sentence order prediction
1156
+ """
1157
+
1158
+ def __init__(self, *args, **kwargs):
1159
+ warnings.warn(
1160
+ "DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
1161
+ "DataCollatorForLanguageModeling instead.",
1162
+ FutureWarning,
1163
+ )
1164
+
1165
+ def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, Any]:
1166
+ import torch
1167
+ from torch.nn.utils.rnn import pad_sequence
1168
+
1169
+ input_ids = [example["input_ids"] for example in examples]
1170
+ input_ids = _torch_collate_batch(input_ids, self.tokenizer)
1171
+ input_ids, labels, attention_mask = self.mask_tokens(input_ids)
1172
+
1173
+ token_type_ids = [example["token_type_ids"] for example in examples]
1174
+ # size of segment_ids varied because randomness, padding zero to the end as the original implementation
1175
+ token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
1176
+
1177
+ sop_label_list = [example["sentence_order_label"] for example in examples]
1178
+ sentence_order_label = torch.stack(sop_label_list)
1179
+
1180
+ return {
1181
+ "input_ids": input_ids,
1182
+ "labels": labels,
1183
+ "attention_mask": attention_mask,
1184
+ "token_type_ids": token_type_ids,
1185
+ "sentence_order_label": sentence_order_label,
1186
+ }
1187
+
1188
+ def mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any]:
1189
+ """
1190
+ Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
1191
+ original. N-gram not applied yet.
1192
+ """
1193
+ import torch
1194
+
1195
+ if self.tokenizer.mask_token is None:
1196
+ raise ValueError(
1197
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
1198
+ " --mlm flag if you want to use this tokenizer."
1199
+ )
1200
+
1201
+ labels = inputs.clone()
1202
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
1203
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
1204
+ special_tokens_mask = [
1205
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
1206
+ ]
1207
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
1208
+ if self.tokenizer._pad_token is not None:
1209
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
1210
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
1211
+ masked_indices = torch.bernoulli(probability_matrix).bool()
1212
+ # probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
1213
+ attention_mask = (~masked_indices).float()
1214
+ if self.tokenizer._pad_token is not None:
1215
+ attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
1216
+ attention_mask.masked_fill_(attention_padding_mask, value=1.0)
1217
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
1218
+
1219
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
1220
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
1221
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
1222
+
1223
+ # 10% of the time, we replace masked input tokens with random word
1224
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
1225
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
1226
+ inputs[indices_random] = random_words[indices_random]
1227
+
1228
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
1229
+ return inputs, labels, attention_mask
1230
+
1231
+
1232
+ @dataclass
1233
+ class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
1234
+ """
1235
+ Data collator used for permutation language modeling.
1236
+
1237
+ - collates batches of tensors, honoring their tokenizer's pad_token
1238
+ - preprocesses batches for permutation language modeling with procedures specific to XLNet
1239
+ """
1240
+
1241
+ tokenizer: PreTrainedTokenizerBase
1242
+ plm_probability: float = 1 / 6
1243
+ max_span_length: int = 5 # maximum length of a span of masked tokens
1244
+ return_tensors: str = "pt"
1245
+
1246
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1247
+ if isinstance(examples[0], Mapping):
1248
+ examples = [e["input_ids"] for e in examples]
1249
+ batch = _torch_collate_batch(examples, self.tokenizer)
1250
+ inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
1251
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1252
+
1253
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1254
+ if isinstance(examples[0], Mapping):
1255
+ examples = [e["input_ids"] for e in examples]
1256
+ batch = _tf_collate_batch(examples, self.tokenizer)
1257
+ inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
1258
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1259
+
1260
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
1261
+ if isinstance(examples[0], Mapping):
1262
+ examples = [e["input_ids"] for e in examples]
1263
+ batch = _numpy_collate_batch(examples, self.tokenizer)
1264
+ inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
1265
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1266
+
1267
+ def torch_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
1268
+ """
1269
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
1270
+
1271
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1272
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1273
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
1274
+ masked
1275
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
1276
+ span_length]` and mask tokens `start_index:start_index + span_length`
1277
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
1278
+ sequence to be processed), repeat from Step 1.
1279
+ """
1280
+ import torch
1281
+
1282
+ if self.tokenizer.mask_token is None:
1283
+ raise ValueError(
1284
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
1285
+ " Please add a mask token if you want to use this tokenizer."
1286
+ )
1287
+
1288
+ if inputs.size(1) % 2 != 0:
1289
+ raise ValueError(
1290
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
1291
+ " relevant comments in source code for details."
1292
+ )
1293
+
1294
+ labels = inputs.clone()
1295
+ # Creating the mask and target_mapping tensors
1296
+ masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
1297
+ target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
1298
+
1299
+ for i in range(labels.size(0)):
1300
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1301
+ cur_len = 0
1302
+ max_len = labels.size(1)
1303
+
1304
+ while cur_len < max_len:
1305
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1306
+ span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
1307
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
1308
+ context_length = int(span_length / self.plm_probability)
1309
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
1310
+ start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
1311
+ masked_indices[i, start_index : start_index + span_length] = 1
1312
+ # Set `cur_len = cur_len + context_length`
1313
+ cur_len += context_length
1314
+
1315
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
1316
+ # the i-th predict corresponds to the i-th token.
1317
+ target_mapping[i] = torch.eye(labels.size(1))
1318
+
1319
+ special_tokens_mask = torch.tensor(
1320
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
1321
+ dtype=torch.bool,
1322
+ )
1323
+ masked_indices.masked_fill_(special_tokens_mask, value=0.0)
1324
+ if self.tokenizer._pad_token is not None:
1325
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
1326
+ masked_indices.masked_fill_(padding_mask, value=0.0)
1327
+
1328
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
1329
+ non_func_mask = ~(padding_mask | special_tokens_mask)
1330
+
1331
+ inputs[masked_indices] = self.tokenizer.mask_token_id
1332
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1333
+
1334
+ perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
1335
+
1336
+ for i in range(labels.size(0)):
1337
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
1338
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
1339
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
1340
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
1341
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
1342
+ # This requires that the sequence length be even.
1343
+
1344
+ # Create a linear factorisation order
1345
+ perm_index = torch.arange(labels.size(1))
1346
+ # Split this into two halves, assuming that half the sequence is reused each time
1347
+ perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
1348
+ # Permute the two halves such that they do not cross over
1349
+ perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
1350
+ # Flatten this out into the desired permuted factorisation order
1351
+ perm_index = torch.flatten(perm_index.transpose(0, 1))
1352
+ # Set the permutation indices of non-masked (non-functional) tokens to the
1353
+ # smallest index (-1) so that:
1354
+ # (1) They can be seen by all other positions
1355
+ # (2) They cannot see masked positions, so there won't be information leak
1356
+ perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
1357
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
1358
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
1359
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
1360
+ perm_mask[i] = (
1361
+ perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
1362
+ ) & masked_indices[i]
1363
+
1364
+ return inputs.long(), perm_mask, target_mapping, labels.long()
1365
+
1366
+ def tf_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
1367
+ """
1368
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
1369
+
1370
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1371
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1372
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
1373
+ masked
1374
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
1375
+ span_length]` and mask tokens `start_index:start_index + span_length`
1376
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
1377
+ sequence to be processed), repeat from Step 1.
1378
+ """
1379
+ import tensorflow as tf
1380
+
1381
+ if self.tokenizer.mask_token is None:
1382
+ raise ValueError(
1383
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
1384
+ " Please add a mask token if you want to use this tokenizer."
1385
+ )
1386
+
1387
+ if tf.shape(inputs)[1] % 2 != 0:
1388
+ raise ValueError(
1389
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
1390
+ " relevant comments in source code for details."
1391
+ )
1392
+
1393
+ labels = tf.identity(inputs)
1394
+ # Creating the mask and target_mapping tensors
1395
+ masked_indices = np.full(labels.shape.as_list(), 0, dtype=bool)
1396
+ labels_shape = tf.shape(labels)
1397
+ target_mapping = np.zeros((labels_shape[0], labels_shape[1], labels_shape[1]), dtype=np.float32)
1398
+
1399
+ for i in range(len(labels)):
1400
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1401
+ cur_len = 0
1402
+ max_len = tf.shape(labels)[1]
1403
+
1404
+ while cur_len < max_len:
1405
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1406
+ span_length = randint(1, self.max_span_length + 1)
1407
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
1408
+ context_length = int(span_length / self.plm_probability)
1409
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
1410
+ start_index = cur_len + randint(0, context_length - span_length + 1)
1411
+ masked_indices[i, start_index : start_index + span_length] = 1
1412
+ # Set `cur_len = cur_len + context_length`
1413
+ cur_len += context_length
1414
+
1415
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
1416
+ # the i-th predict corresponds to the i-th token.
1417
+ target_mapping[i] = np.eye(labels_shape[1])
1418
+ masked_indices = tf.cast(tf.convert_to_tensor(masked_indices), dtype=tf.bool)
1419
+ target_mapping = tf.convert_to_tensor(target_mapping)
1420
+ special_tokens_mask = tf.convert_to_tensor(
1421
+ [
1422
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
1423
+ for val in labels.numpy().tolist()
1424
+ ],
1425
+ )
1426
+ special_tokens_mask = tf.cast(special_tokens_mask, dtype=tf.bool)
1427
+ masked_indices = masked_indices & ~special_tokens_mask
1428
+ if self.tokenizer._pad_token is not None:
1429
+ padding_mask = labels == self.tokenizer.pad_token_id
1430
+ masked_indices = masked_indices & ~padding_mask
1431
+
1432
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
1433
+ non_func_mask = ~(padding_mask | special_tokens_mask)
1434
+
1435
+ inputs = tf.where(masked_indices, self.tokenizer.mask_token_id, inputs)
1436
+ labels = tf.where(masked_indices, labels, -100) # We only compute loss on masked tokens
1437
+
1438
+ perm_mask = []
1439
+
1440
+ for i in range(len(labels)):
1441
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
1442
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
1443
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
1444
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
1445
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
1446
+ # This requires that the sequence length be even.
1447
+
1448
+ # Create a linear factorisation order
1449
+ # tf.range is the equivalent of torch.arange
1450
+ perm_index = tf.range(labels_shape[1])
1451
+ # Split this into two halves, assuming that half the sequence is reused each time
1452
+ perm_index = tf.transpose(tf.reshape(perm_index, (-1, labels_shape[1] // 2)))
1453
+ # Permute the two halves such that they do not cross over
1454
+ perm_index = tf.random.shuffle(perm_index) # Shuffles along the first dimension
1455
+ # Flatten this out into the desired permuted factorisation order
1456
+ perm_index = tf.reshape(tf.transpose(perm_index), (-1,))
1457
+ # Set the permutation indices of non-masked (non-functional) tokens to the
1458
+ # smallest index (-1) so that:
1459
+ # (1) They can be seen by all other positions
1460
+ # (2) They cannot see masked positions, so there won't be information leak
1461
+ perm_index = tf.where(~masked_indices[i] & non_func_mask[i], -1, perm_index)
1462
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
1463
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
1464
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
1465
+ perm_mask.append(
1466
+ (tf.reshape(perm_index, (labels_shape[1], 1)) <= tf.reshape(perm_index, (1, labels_shape[1])))
1467
+ & masked_indices[i]
1468
+ )
1469
+ perm_mask = tf.stack(perm_mask, axis=0)
1470
+
1471
+ return tf.cast(inputs, tf.int64), tf.cast(perm_mask, tf.float32), target_mapping, tf.cast(labels, tf.int64)
1472
+
1473
+ def numpy_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
1474
+ """
1475
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
1476
+
1477
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1478
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1479
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
1480
+ masked
1481
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
1482
+ span_length]` and mask tokens `start_index:start_index + span_length`
1483
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
1484
+ sequence to be processed), repeat from Step 1.
1485
+ """
1486
+ if self.tokenizer.mask_token is None:
1487
+ raise ValueError(
1488
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
1489
+ " Please add a mask token if you want to use this tokenizer."
1490
+ )
1491
+
1492
+ if inputs.shape[1] % 2 != 0:
1493
+ raise ValueError(
1494
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
1495
+ " relevant comments in source code for details."
1496
+ )
1497
+
1498
+ labels = np.copy(inputs)
1499
+ # Creating the mask and target_mapping tensors
1500
+ masked_indices = np.full(labels.shape, 0, dtype=bool)
1501
+ target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
1502
+
1503
+ for i in range(labels.shape[0]):
1504
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1505
+ cur_len = 0
1506
+ max_len = labels.shape[1]
1507
+
1508
+ while cur_len < max_len:
1509
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
1510
+ span_length = randint(1, self.max_span_length + 1)
1511
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
1512
+ context_length = int(span_length / self.plm_probability)
1513
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
1514
+ start_index = cur_len + randint(0, context_length - span_length + 1)
1515
+ masked_indices[i, start_index : start_index + span_length] = 1
1516
+ # Set `cur_len = cur_len + context_length`
1517
+ cur_len += context_length
1518
+
1519
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
1520
+ # the i-th predict corresponds to the i-th token.
1521
+ target_mapping[i] = np.eye(labels.shape[1])
1522
+
1523
+ special_tokens_mask = np.array(
1524
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
1525
+ dtype=bool,
1526
+ )
1527
+ masked_indices[special_tokens_mask] = 0
1528
+ if self.tokenizer._pad_token is not None:
1529
+ padding_mask = labels == self.tokenizer.pad_token_id
1530
+ masked_indices[padding_mask] = 0.0
1531
+
1532
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
1533
+ non_func_mask = ~(padding_mask | special_tokens_mask)
1534
+
1535
+ inputs[masked_indices] = self.tokenizer.mask_token_id
1536
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
1537
+
1538
+ perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
1539
+
1540
+ for i in range(labels.shape[0]):
1541
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
1542
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
1543
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
1544
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
1545
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
1546
+ # This requires that the sequence length be even.
1547
+
1548
+ # Create a linear factorisation order
1549
+ perm_index = np.arange(labels.shape[1])
1550
+ # Split this into two halves, assuming that half the sequence is reused each time
1551
+ perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
1552
+ # Permute the two halves such that they do not cross over
1553
+ np.random.shuffle(perm_index)
1554
+ # Flatten this out into the desired permuted factorisation order
1555
+ perm_index = perm_index.T.flatten()
1556
+ # Set the permutation indices of non-masked (non-functional) tokens to the
1557
+ # smallest index (-1) so that:
1558
+ # (1) They can be seen by all other positions
1559
+ # (2) They cannot see masked positions, so there won't be information leak
1560
+ perm_index[~masked_indices[i] & non_func_mask[i]] = -1
1561
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
1562
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
1563
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
1564
+ perm_mask[i] = (
1565
+ perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))
1566
+ ) & masked_indices[i]
1567
+
1568
+ return inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64)
evalkit_internvl/lib/python3.10/site-packages/transformers/data/metrics/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ import warnings
14
+
15
+ from ...utils import is_sklearn_available, requires_backends
16
+
17
+
18
+ if is_sklearn_available():
19
+ from scipy.stats import pearsonr, spearmanr
20
+ from sklearn.metrics import f1_score, matthews_corrcoef
21
+
22
+
23
+ DEPRECATION_WARNING = (
24
+ "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
25
+ "library. You can have a look at this example script for pointers: "
26
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
27
+ )
28
+
29
+
30
+ def simple_accuracy(preds, labels):
31
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
32
+ requires_backends(simple_accuracy, "sklearn")
33
+ return (preds == labels).mean()
34
+
35
+
36
+ def acc_and_f1(preds, labels):
37
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
38
+ requires_backends(acc_and_f1, "sklearn")
39
+ acc = simple_accuracy(preds, labels)
40
+ f1 = f1_score(y_true=labels, y_pred=preds)
41
+ return {
42
+ "acc": acc,
43
+ "f1": f1,
44
+ "acc_and_f1": (acc + f1) / 2,
45
+ }
46
+
47
+
48
+ def pearson_and_spearman(preds, labels):
49
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
50
+ requires_backends(pearson_and_spearman, "sklearn")
51
+ pearson_corr = pearsonr(preds, labels)[0]
52
+ spearman_corr = spearmanr(preds, labels)[0]
53
+ return {
54
+ "pearson": pearson_corr,
55
+ "spearmanr": spearman_corr,
56
+ "corr": (pearson_corr + spearman_corr) / 2,
57
+ }
58
+
59
+
60
+ def glue_compute_metrics(task_name, preds, labels):
61
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
62
+ requires_backends(glue_compute_metrics, "sklearn")
63
+ assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
64
+ if task_name == "cola":
65
+ return {"mcc": matthews_corrcoef(labels, preds)}
66
+ elif task_name == "sst-2":
67
+ return {"acc": simple_accuracy(preds, labels)}
68
+ elif task_name == "mrpc":
69
+ return acc_and_f1(preds, labels)
70
+ elif task_name == "sts-b":
71
+ return pearson_and_spearman(preds, labels)
72
+ elif task_name == "qqp":
73
+ return acc_and_f1(preds, labels)
74
+ elif task_name == "mnli":
75
+ return {"mnli/acc": simple_accuracy(preds, labels)}
76
+ elif task_name == "mnli-mm":
77
+ return {"mnli-mm/acc": simple_accuracy(preds, labels)}
78
+ elif task_name == "qnli":
79
+ return {"acc": simple_accuracy(preds, labels)}
80
+ elif task_name == "rte":
81
+ return {"acc": simple_accuracy(preds, labels)}
82
+ elif task_name == "wnli":
83
+ return {"acc": simple_accuracy(preds, labels)}
84
+ elif task_name == "hans":
85
+ return {"acc": simple_accuracy(preds, labels)}
86
+ else:
87
+ raise KeyError(task_name)
88
+
89
+
90
+ def xnli_compute_metrics(task_name, preds, labels):
91
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
92
+ requires_backends(xnli_compute_metrics, "sklearn")
93
+ if len(preds) != len(labels):
94
+ raise ValueError(f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}")
95
+ if task_name == "xnli":
96
+ return {"acc": simple_accuracy(preds, labels)}
97
+ else:
98
+ raise KeyError(task_name)
evalkit_internvl/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.54 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was modified by XLNet authors to
16
+ update `find_best_threshold` scripts for SQuAD V2.0
17
+
18
+ In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an
19
+ additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted
20
+ probability that a question is unanswerable.
21
+ """
22
+
23
+
24
+ import collections
25
+ import json
26
+ import math
27
+ import re
28
+ import string
29
+
30
+ from ...models.bert import BasicTokenizer
31
+ from ...utils import logging
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def normalize_answer(s):
38
+ """Lower text and remove punctuation, articles and extra whitespace."""
39
+
40
+ def remove_articles(text):
41
+ regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
42
+ return re.sub(regex, " ", text)
43
+
44
+ def white_space_fix(text):
45
+ return " ".join(text.split())
46
+
47
+ def remove_punc(text):
48
+ exclude = set(string.punctuation)
49
+ return "".join(ch for ch in text if ch not in exclude)
50
+
51
+ def lower(text):
52
+ return text.lower()
53
+
54
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
55
+
56
+
57
+ def get_tokens(s):
58
+ if not s:
59
+ return []
60
+ return normalize_answer(s).split()
61
+
62
+
63
+ def compute_exact(a_gold, a_pred):
64
+ return int(normalize_answer(a_gold) == normalize_answer(a_pred))
65
+
66
+
67
+ def compute_f1(a_gold, a_pred):
68
+ gold_toks = get_tokens(a_gold)
69
+ pred_toks = get_tokens(a_pred)
70
+ common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
71
+ num_same = sum(common.values())
72
+ if len(gold_toks) == 0 or len(pred_toks) == 0:
73
+ # If either is no-answer, then F1 is 1 if they agree, 0 otherwise
74
+ return int(gold_toks == pred_toks)
75
+ if num_same == 0:
76
+ return 0
77
+ precision = 1.0 * num_same / len(pred_toks)
78
+ recall = 1.0 * num_same / len(gold_toks)
79
+ f1 = (2 * precision * recall) / (precision + recall)
80
+ return f1
81
+
82
+
83
+ def get_raw_scores(examples, preds):
84
+ """
85
+ Computes the exact and f1 scores from the examples and the model predictions
86
+ """
87
+ exact_scores = {}
88
+ f1_scores = {}
89
+
90
+ for example in examples:
91
+ qas_id = example.qas_id
92
+ gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
93
+
94
+ if not gold_answers:
95
+ # For unanswerable questions, only correct answer is empty string
96
+ gold_answers = [""]
97
+
98
+ if qas_id not in preds:
99
+ print(f"Missing prediction for {qas_id}")
100
+ continue
101
+
102
+ prediction = preds[qas_id]
103
+ exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
104
+ f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
105
+
106
+ return exact_scores, f1_scores
107
+
108
+
109
+ def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
110
+ new_scores = {}
111
+ for qid, s in scores.items():
112
+ pred_na = na_probs[qid] > na_prob_thresh
113
+ if pred_na:
114
+ new_scores[qid] = float(not qid_to_has_ans[qid])
115
+ else:
116
+ new_scores[qid] = s
117
+ return new_scores
118
+
119
+
120
+ def make_eval_dict(exact_scores, f1_scores, qid_list=None):
121
+ if not qid_list:
122
+ total = len(exact_scores)
123
+ return collections.OrderedDict(
124
+ [
125
+ ("exact", 100.0 * sum(exact_scores.values()) / total),
126
+ ("f1", 100.0 * sum(f1_scores.values()) / total),
127
+ ("total", total),
128
+ ]
129
+ )
130
+ else:
131
+ total = len(qid_list)
132
+ return collections.OrderedDict(
133
+ [
134
+ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
135
+ ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
136
+ ("total", total),
137
+ ]
138
+ )
139
+
140
+
141
+ def merge_eval(main_eval, new_eval, prefix):
142
+ for k in new_eval:
143
+ main_eval[f"{prefix}_{k}"] = new_eval[k]
144
+
145
+
146
+ def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
147
+ num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
148
+ cur_score = num_no_ans
149
+ best_score = cur_score
150
+ best_thresh = 0.0
151
+ qid_list = sorted(na_probs, key=lambda k: na_probs[k])
152
+ for i, qid in enumerate(qid_list):
153
+ if qid not in scores:
154
+ continue
155
+ if qid_to_has_ans[qid]:
156
+ diff = scores[qid]
157
+ else:
158
+ if preds[qid]:
159
+ diff = -1
160
+ else:
161
+ diff = 0
162
+ cur_score += diff
163
+ if cur_score > best_score:
164
+ best_score = cur_score
165
+ best_thresh = na_probs[qid]
166
+
167
+ has_ans_score, has_ans_cnt = 0, 0
168
+ for qid in qid_list:
169
+ if not qid_to_has_ans[qid]:
170
+ continue
171
+ has_ans_cnt += 1
172
+
173
+ if qid not in scores:
174
+ continue
175
+ has_ans_score += scores[qid]
176
+
177
+ return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
178
+
179
+
180
+ def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
181
+ best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
182
+ best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
183
+ main_eval["best_exact"] = best_exact
184
+ main_eval["best_exact_thresh"] = exact_thresh
185
+ main_eval["best_f1"] = best_f1
186
+ main_eval["best_f1_thresh"] = f1_thresh
187
+ main_eval["has_ans_exact"] = has_ans_exact
188
+ main_eval["has_ans_f1"] = has_ans_f1
189
+
190
+
191
+ def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
192
+ num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
193
+ cur_score = num_no_ans
194
+ best_score = cur_score
195
+ best_thresh = 0.0
196
+ qid_list = sorted(na_probs, key=lambda k: na_probs[k])
197
+ for _, qid in enumerate(qid_list):
198
+ if qid not in scores:
199
+ continue
200
+ if qid_to_has_ans[qid]:
201
+ diff = scores[qid]
202
+ else:
203
+ if preds[qid]:
204
+ diff = -1
205
+ else:
206
+ diff = 0
207
+ cur_score += diff
208
+ if cur_score > best_score:
209
+ best_score = cur_score
210
+ best_thresh = na_probs[qid]
211
+ return 100.0 * best_score / len(scores), best_thresh
212
+
213
+
214
+ def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
215
+ best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
216
+ best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
217
+
218
+ main_eval["best_exact"] = best_exact
219
+ main_eval["best_exact_thresh"] = exact_thresh
220
+ main_eval["best_f1"] = best_f1
221
+ main_eval["best_f1_thresh"] = f1_thresh
222
+
223
+
224
+ def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
225
+ qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
226
+ has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
227
+ no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
228
+
229
+ if no_answer_probs is None:
230
+ no_answer_probs = {k: 0.0 for k in preds}
231
+
232
+ exact, f1 = get_raw_scores(examples, preds)
233
+
234
+ exact_threshold = apply_no_ans_threshold(
235
+ exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
236
+ )
237
+ f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
238
+
239
+ evaluation = make_eval_dict(exact_threshold, f1_threshold)
240
+
241
+ if has_answer_qids:
242
+ has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
243
+ merge_eval(evaluation, has_ans_eval, "HasAns")
244
+
245
+ if no_answer_qids:
246
+ no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
247
+ merge_eval(evaluation, no_ans_eval, "NoAns")
248
+
249
+ if no_answer_probs:
250
+ find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
251
+
252
+ return evaluation
253
+
254
+
255
+ def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
256
+ """Project the tokenized prediction back to the original text."""
257
+
258
+ # When we created the data, we kept track of the alignment between original
259
+ # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
260
+ # now `orig_text` contains the span of our original text corresponding to the
261
+ # span that we predicted.
262
+ #
263
+ # However, `orig_text` may contain extra characters that we don't want in
264
+ # our prediction.
265
+ #
266
+ # For example, let's say:
267
+ # pred_text = steve smith
268
+ # orig_text = Steve Smith's
269
+ #
270
+ # We don't want to return `orig_text` because it contains the extra "'s".
271
+ #
272
+ # We don't want to return `pred_text` because it's already been normalized
273
+ # (the SQuAD eval script also does punctuation stripping/lower casing but
274
+ # our tokenizer does additional normalization like stripping accent
275
+ # characters).
276
+ #
277
+ # What we really want to return is "Steve Smith".
278
+ #
279
+ # Therefore, we have to apply a semi-complicated alignment heuristic between
280
+ # `pred_text` and `orig_text` to get a character-to-character alignment. This
281
+ # can fail in certain cases in which case we just return `orig_text`.
282
+
283
+ def _strip_spaces(text):
284
+ ns_chars = []
285
+ ns_to_s_map = collections.OrderedDict()
286
+ for i, c in enumerate(text):
287
+ if c == " ":
288
+ continue
289
+ ns_to_s_map[len(ns_chars)] = i
290
+ ns_chars.append(c)
291
+ ns_text = "".join(ns_chars)
292
+ return (ns_text, ns_to_s_map)
293
+
294
+ # We first tokenize `orig_text`, strip whitespace from the result
295
+ # and `pred_text`, and check if they are the same length. If they are
296
+ # NOT the same length, the heuristic has failed. If they are the same
297
+ # length, we assume the characters are one-to-one aligned.
298
+ tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
299
+
300
+ tok_text = " ".join(tokenizer.tokenize(orig_text))
301
+
302
+ start_position = tok_text.find(pred_text)
303
+ if start_position == -1:
304
+ if verbose_logging:
305
+ logger.info(f"Unable to find text: '{pred_text}' in '{orig_text}'")
306
+ return orig_text
307
+ end_position = start_position + len(pred_text) - 1
308
+
309
+ (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
310
+ (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
311
+
312
+ if len(orig_ns_text) != len(tok_ns_text):
313
+ if verbose_logging:
314
+ logger.info(f"Length not equal after stripping spaces: '{orig_ns_text}' vs '{tok_ns_text}'")
315
+ return orig_text
316
+
317
+ # We then project the characters in `pred_text` back to `orig_text` using
318
+ # the character-to-character alignment.
319
+ tok_s_to_ns_map = {}
320
+ for i, tok_index in tok_ns_to_s_map.items():
321
+ tok_s_to_ns_map[tok_index] = i
322
+
323
+ orig_start_position = None
324
+ if start_position in tok_s_to_ns_map:
325
+ ns_start_position = tok_s_to_ns_map[start_position]
326
+ if ns_start_position in orig_ns_to_s_map:
327
+ orig_start_position = orig_ns_to_s_map[ns_start_position]
328
+
329
+ if orig_start_position is None:
330
+ if verbose_logging:
331
+ logger.info("Couldn't map start position")
332
+ return orig_text
333
+
334
+ orig_end_position = None
335
+ if end_position in tok_s_to_ns_map:
336
+ ns_end_position = tok_s_to_ns_map[end_position]
337
+ if ns_end_position in orig_ns_to_s_map:
338
+ orig_end_position = orig_ns_to_s_map[ns_end_position]
339
+
340
+ if orig_end_position is None:
341
+ if verbose_logging:
342
+ logger.info("Couldn't map end position")
343
+ return orig_text
344
+
345
+ output_text = orig_text[orig_start_position : (orig_end_position + 1)]
346
+ return output_text
347
+
348
+
349
+ def _get_best_indexes(logits, n_best_size):
350
+ """Get the n-best logits from a list."""
351
+ index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
352
+
353
+ best_indexes = []
354
+ for i in range(len(index_and_score)):
355
+ if i >= n_best_size:
356
+ break
357
+ best_indexes.append(index_and_score[i][0])
358
+ return best_indexes
359
+
360
+
361
+ def _compute_softmax(scores):
362
+ """Compute softmax probability over raw logits."""
363
+ if not scores:
364
+ return []
365
+
366
+ max_score = None
367
+ for score in scores:
368
+ if max_score is None or score > max_score:
369
+ max_score = score
370
+
371
+ exp_scores = []
372
+ total_sum = 0.0
373
+ for score in scores:
374
+ x = math.exp(score - max_score)
375
+ exp_scores.append(x)
376
+ total_sum += x
377
+
378
+ probs = []
379
+ for score in exp_scores:
380
+ probs.append(score / total_sum)
381
+ return probs
382
+
383
+
384
+ def compute_predictions_logits(
385
+ all_examples,
386
+ all_features,
387
+ all_results,
388
+ n_best_size,
389
+ max_answer_length,
390
+ do_lower_case,
391
+ output_prediction_file,
392
+ output_nbest_file,
393
+ output_null_log_odds_file,
394
+ verbose_logging,
395
+ version_2_with_negative,
396
+ null_score_diff_threshold,
397
+ tokenizer,
398
+ ):
399
+ """Write final predictions to the json file and log-odds of null if needed."""
400
+ if output_prediction_file:
401
+ logger.info(f"Writing predictions to: {output_prediction_file}")
402
+ if output_nbest_file:
403
+ logger.info(f"Writing nbest to: {output_nbest_file}")
404
+ if output_null_log_odds_file and version_2_with_negative:
405
+ logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}")
406
+
407
+ example_index_to_features = collections.defaultdict(list)
408
+ for feature in all_features:
409
+ example_index_to_features[feature.example_index].append(feature)
410
+
411
+ unique_id_to_result = {}
412
+ for result in all_results:
413
+ unique_id_to_result[result.unique_id] = result
414
+
415
+ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
416
+ "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
417
+ )
418
+
419
+ all_predictions = collections.OrderedDict()
420
+ all_nbest_json = collections.OrderedDict()
421
+ scores_diff_json = collections.OrderedDict()
422
+
423
+ for example_index, example in enumerate(all_examples):
424
+ features = example_index_to_features[example_index]
425
+
426
+ prelim_predictions = []
427
+ # keep track of the minimum score of null start+end of position 0
428
+ score_null = 1000000 # large and positive
429
+ min_null_feature_index = 0 # the paragraph slice with min null score
430
+ null_start_logit = 0 # the start logit at the slice with min null score
431
+ null_end_logit = 0 # the end logit at the slice with min null score
432
+ for feature_index, feature in enumerate(features):
433
+ result = unique_id_to_result[feature.unique_id]
434
+ start_indexes = _get_best_indexes(result.start_logits, n_best_size)
435
+ end_indexes = _get_best_indexes(result.end_logits, n_best_size)
436
+ # if we could have irrelevant answers, get the min score of irrelevant
437
+ if version_2_with_negative:
438
+ feature_null_score = result.start_logits[0] + result.end_logits[0]
439
+ if feature_null_score < score_null:
440
+ score_null = feature_null_score
441
+ min_null_feature_index = feature_index
442
+ null_start_logit = result.start_logits[0]
443
+ null_end_logit = result.end_logits[0]
444
+ for start_index in start_indexes:
445
+ for end_index in end_indexes:
446
+ # We could hypothetically create invalid predictions, e.g., predict
447
+ # that the start of the span is in the question. We throw out all
448
+ # invalid predictions.
449
+ if start_index >= len(feature.tokens):
450
+ continue
451
+ if end_index >= len(feature.tokens):
452
+ continue
453
+ if start_index not in feature.token_to_orig_map:
454
+ continue
455
+ if end_index not in feature.token_to_orig_map:
456
+ continue
457
+ if not feature.token_is_max_context.get(start_index, False):
458
+ continue
459
+ if end_index < start_index:
460
+ continue
461
+ length = end_index - start_index + 1
462
+ if length > max_answer_length:
463
+ continue
464
+ prelim_predictions.append(
465
+ _PrelimPrediction(
466
+ feature_index=feature_index,
467
+ start_index=start_index,
468
+ end_index=end_index,
469
+ start_logit=result.start_logits[start_index],
470
+ end_logit=result.end_logits[end_index],
471
+ )
472
+ )
473
+ if version_2_with_negative:
474
+ prelim_predictions.append(
475
+ _PrelimPrediction(
476
+ feature_index=min_null_feature_index,
477
+ start_index=0,
478
+ end_index=0,
479
+ start_logit=null_start_logit,
480
+ end_logit=null_end_logit,
481
+ )
482
+ )
483
+ prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
484
+
485
+ _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
486
+ "NbestPrediction", ["text", "start_logit", "end_logit"]
487
+ )
488
+
489
+ seen_predictions = {}
490
+ nbest = []
491
+ for pred in prelim_predictions:
492
+ if len(nbest) >= n_best_size:
493
+ break
494
+ feature = features[pred.feature_index]
495
+ if pred.start_index > 0: # this is a non-null prediction
496
+ tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
497
+ orig_doc_start = feature.token_to_orig_map[pred.start_index]
498
+ orig_doc_end = feature.token_to_orig_map[pred.end_index]
499
+ orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
500
+
501
+ tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
502
+
503
+ # tok_text = " ".join(tok_tokens)
504
+ #
505
+ # # De-tokenize WordPieces that have been split off.
506
+ # tok_text = tok_text.replace(" ##", "")
507
+ # tok_text = tok_text.replace("##", "")
508
+
509
+ # Clean whitespace
510
+ tok_text = tok_text.strip()
511
+ tok_text = " ".join(tok_text.split())
512
+ orig_text = " ".join(orig_tokens)
513
+
514
+ final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
515
+ if final_text in seen_predictions:
516
+ continue
517
+
518
+ seen_predictions[final_text] = True
519
+ else:
520
+ final_text = ""
521
+ seen_predictions[final_text] = True
522
+
523
+ nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
524
+ # if we didn't include the empty option in the n-best, include it
525
+ if version_2_with_negative:
526
+ if "" not in seen_predictions:
527
+ nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
528
+
529
+ # In very rare edge cases we could only have single null prediction.
530
+ # So we just create a nonce prediction in this case to avoid failure.
531
+ if len(nbest) == 1:
532
+ nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
533
+
534
+ # In very rare edge cases we could have no valid predictions. So we
535
+ # just create a nonce prediction in this case to avoid failure.
536
+ if not nbest:
537
+ nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
538
+
539
+ if len(nbest) < 1:
540
+ raise ValueError("No valid predictions")
541
+
542
+ total_scores = []
543
+ best_non_null_entry = None
544
+ for entry in nbest:
545
+ total_scores.append(entry.start_logit + entry.end_logit)
546
+ if not best_non_null_entry:
547
+ if entry.text:
548
+ best_non_null_entry = entry
549
+
550
+ probs = _compute_softmax(total_scores)
551
+
552
+ nbest_json = []
553
+ for i, entry in enumerate(nbest):
554
+ output = collections.OrderedDict()
555
+ output["text"] = entry.text
556
+ output["probability"] = probs[i]
557
+ output["start_logit"] = entry.start_logit
558
+ output["end_logit"] = entry.end_logit
559
+ nbest_json.append(output)
560
+
561
+ if len(nbest_json) < 1:
562
+ raise ValueError("No valid predictions")
563
+
564
+ if not version_2_with_negative:
565
+ all_predictions[example.qas_id] = nbest_json[0]["text"]
566
+ else:
567
+ # predict "" iff the null score - the score of best non-null > threshold
568
+ score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
569
+ scores_diff_json[example.qas_id] = score_diff
570
+ if score_diff > null_score_diff_threshold:
571
+ all_predictions[example.qas_id] = ""
572
+ else:
573
+ all_predictions[example.qas_id] = best_non_null_entry.text
574
+ all_nbest_json[example.qas_id] = nbest_json
575
+
576
+ if output_prediction_file:
577
+ with open(output_prediction_file, "w") as writer:
578
+ writer.write(json.dumps(all_predictions, indent=4) + "\n")
579
+
580
+ if output_nbest_file:
581
+ with open(output_nbest_file, "w") as writer:
582
+ writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
583
+
584
+ if output_null_log_odds_file and version_2_with_negative:
585
+ with open(output_null_log_odds_file, "w") as writer:
586
+ writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
587
+
588
+ return all_predictions
589
+
590
+
591
+ def compute_predictions_log_probs(
592
+ all_examples,
593
+ all_features,
594
+ all_results,
595
+ n_best_size,
596
+ max_answer_length,
597
+ output_prediction_file,
598
+ output_nbest_file,
599
+ output_null_log_odds_file,
600
+ start_n_top,
601
+ end_n_top,
602
+ version_2_with_negative,
603
+ tokenizer,
604
+ verbose_logging,
605
+ ):
606
+ """
607
+ XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of
608
+ null if needed.
609
+
610
+ Requires utils_squad_evaluate.py
611
+ """
612
+ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
613
+ "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]
614
+ )
615
+
616
+ _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
617
+ "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
618
+ )
619
+
620
+ logger.info(f"Writing predictions to: {output_prediction_file}")
621
+
622
+ example_index_to_features = collections.defaultdict(list)
623
+ for feature in all_features:
624
+ example_index_to_features[feature.example_index].append(feature)
625
+
626
+ unique_id_to_result = {}
627
+ for result in all_results:
628
+ unique_id_to_result[result.unique_id] = result
629
+
630
+ all_predictions = collections.OrderedDict()
631
+ all_nbest_json = collections.OrderedDict()
632
+ scores_diff_json = collections.OrderedDict()
633
+
634
+ for example_index, example in enumerate(all_examples):
635
+ features = example_index_to_features[example_index]
636
+
637
+ prelim_predictions = []
638
+ # keep track of the minimum score of null start+end of position 0
639
+ score_null = 1000000 # large and positive
640
+
641
+ for feature_index, feature in enumerate(features):
642
+ result = unique_id_to_result[feature.unique_id]
643
+
644
+ cur_null_score = result.cls_logits
645
+
646
+ # if we could have irrelevant answers, get the min score of irrelevant
647
+ score_null = min(score_null, cur_null_score)
648
+
649
+ for i in range(start_n_top):
650
+ for j in range(end_n_top):
651
+ start_log_prob = result.start_logits[i]
652
+ start_index = result.start_top_index[i]
653
+
654
+ j_index = i * end_n_top + j
655
+
656
+ end_log_prob = result.end_logits[j_index]
657
+ end_index = result.end_top_index[j_index]
658
+
659
+ # We could hypothetically create invalid predictions, e.g., predict
660
+ # that the start of the span is in the question. We throw out all
661
+ # invalid predictions.
662
+ if start_index >= feature.paragraph_len - 1:
663
+ continue
664
+ if end_index >= feature.paragraph_len - 1:
665
+ continue
666
+
667
+ if not feature.token_is_max_context.get(start_index, False):
668
+ continue
669
+ if end_index < start_index:
670
+ continue
671
+ length = end_index - start_index + 1
672
+ if length > max_answer_length:
673
+ continue
674
+
675
+ prelim_predictions.append(
676
+ _PrelimPrediction(
677
+ feature_index=feature_index,
678
+ start_index=start_index,
679
+ end_index=end_index,
680
+ start_log_prob=start_log_prob,
681
+ end_log_prob=end_log_prob,
682
+ )
683
+ )
684
+
685
+ prelim_predictions = sorted(
686
+ prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True
687
+ )
688
+
689
+ seen_predictions = {}
690
+ nbest = []
691
+ for pred in prelim_predictions:
692
+ if len(nbest) >= n_best_size:
693
+ break
694
+ feature = features[pred.feature_index]
695
+
696
+ # XLNet un-tokenizer
697
+ # Let's keep it simple for now and see if we need all this later.
698
+ #
699
+ # tok_start_to_orig_index = feature.tok_start_to_orig_index
700
+ # tok_end_to_orig_index = feature.tok_end_to_orig_index
701
+ # start_orig_pos = tok_start_to_orig_index[pred.start_index]
702
+ # end_orig_pos = tok_end_to_orig_index[pred.end_index]
703
+ # paragraph_text = example.paragraph_text
704
+ # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
705
+
706
+ # Previously used Bert untokenizer
707
+ tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
708
+ orig_doc_start = feature.token_to_orig_map[pred.start_index]
709
+ orig_doc_end = feature.token_to_orig_map[pred.end_index]
710
+ orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
711
+ tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
712
+
713
+ # Clean whitespace
714
+ tok_text = tok_text.strip()
715
+ tok_text = " ".join(tok_text.split())
716
+ orig_text = " ".join(orig_tokens)
717
+
718
+ if hasattr(tokenizer, "do_lower_case"):
719
+ do_lower_case = tokenizer.do_lower_case
720
+ else:
721
+ do_lower_case = tokenizer.do_lowercase_and_remove_accent
722
+
723
+ final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
724
+
725
+ if final_text in seen_predictions:
726
+ continue
727
+
728
+ seen_predictions[final_text] = True
729
+
730
+ nbest.append(
731
+ _NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)
732
+ )
733
+
734
+ # In very rare edge cases we could have no valid predictions. So we
735
+ # just create a nonce prediction in this case to avoid failure.
736
+ if not nbest:
737
+ nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
738
+
739
+ total_scores = []
740
+ best_non_null_entry = None
741
+ for entry in nbest:
742
+ total_scores.append(entry.start_log_prob + entry.end_log_prob)
743
+ if not best_non_null_entry:
744
+ best_non_null_entry = entry
745
+
746
+ probs = _compute_softmax(total_scores)
747
+
748
+ nbest_json = []
749
+ for i, entry in enumerate(nbest):
750
+ output = collections.OrderedDict()
751
+ output["text"] = entry.text
752
+ output["probability"] = probs[i]
753
+ output["start_log_prob"] = entry.start_log_prob
754
+ output["end_log_prob"] = entry.end_log_prob
755
+ nbest_json.append(output)
756
+
757
+ if len(nbest_json) < 1:
758
+ raise ValueError("No valid predictions")
759
+ if best_non_null_entry is None:
760
+ raise ValueError("No valid predictions")
761
+
762
+ score_diff = score_null
763
+ scores_diff_json[example.qas_id] = score_diff
764
+ # note(zhiliny): always predict best_non_null_entry
765
+ # and the evaluation script will search for the best threshold
766
+ all_predictions[example.qas_id] = best_non_null_entry.text
767
+
768
+ all_nbest_json[example.qas_id] = nbest_json
769
+
770
+ with open(output_prediction_file, "w") as writer:
771
+ writer.write(json.dumps(all_predictions, indent=4) + "\n")
772
+
773
+ with open(output_nbest_file, "w") as writer:
774
+ writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
775
+
776
+ if version_2_with_negative:
777
+ with open(output_null_log_odds_file, "w") as writer:
778
+ writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
779
+
780
+ return all_predictions
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
16
+ from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
17
+ from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
18
+ from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (745 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/glue.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ GLUE processors and helpers"""
17
+
18
+ import os
19
+ import warnings
20
+ from dataclasses import asdict
21
+ from enum import Enum
22
+ from typing import List, Optional, Union
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import is_tf_available, logging
26
+ from .utils import DataProcessor, InputExample, InputFeatures
27
+
28
+
29
+ if is_tf_available():
30
+ import tensorflow as tf
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ DEPRECATION_WARNING = (
35
+ "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
36
+ "library. You can have a look at this example script for pointers: "
37
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
38
+ )
39
+
40
+
41
+ def glue_convert_examples_to_features(
42
+ examples: Union[List[InputExample], "tf.data.Dataset"],
43
+ tokenizer: PreTrainedTokenizer,
44
+ max_length: Optional[int] = None,
45
+ task=None,
46
+ label_list=None,
47
+ output_mode=None,
48
+ ):
49
+ """
50
+ Loads a data file into a list of `InputFeatures`
51
+
52
+ Args:
53
+ examples: List of `InputExamples` or `tf.data.Dataset` containing the examples.
54
+ tokenizer: Instance of a tokenizer that will tokenize the examples
55
+ max_length: Maximum example length. Defaults to the tokenizer's max_len
56
+ task: GLUE task
57
+ label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method
58
+ output_mode: String indicating the output mode. Either `regression` or `classification`
59
+
60
+ Returns:
61
+ If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific
62
+ features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which
63
+ can be fed to the model.
64
+
65
+ """
66
+ warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning)
67
+ if is_tf_available() and isinstance(examples, tf.data.Dataset):
68
+ if task is None:
69
+ raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.")
70
+ return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
71
+ return _glue_convert_examples_to_features(
72
+ examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode
73
+ )
74
+
75
+
76
+ if is_tf_available():
77
+
78
+ def _tf_glue_convert_examples_to_features(
79
+ examples: tf.data.Dataset,
80
+ tokenizer: PreTrainedTokenizer,
81
+ task=str,
82
+ max_length: Optional[int] = None,
83
+ ) -> tf.data.Dataset:
84
+ """
85
+ Returns:
86
+ A `tf.data.Dataset` containing the task-specific features.
87
+
88
+ """
89
+ processor = glue_processors[task]()
90
+ examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples]
91
+ features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
92
+ label_type = tf.float32 if task == "sts-b" else tf.int64
93
+
94
+ def gen():
95
+ for ex in features:
96
+ d = {k: v for k, v in asdict(ex).items() if v is not None}
97
+ label = d.pop("label")
98
+ yield (d, label)
99
+
100
+ input_names = tokenizer.model_input_names
101
+
102
+ return tf.data.Dataset.from_generator(
103
+ gen,
104
+ ({k: tf.int32 for k in input_names}, label_type),
105
+ ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])),
106
+ )
107
+
108
+
109
+ def _glue_convert_examples_to_features(
110
+ examples: List[InputExample],
111
+ tokenizer: PreTrainedTokenizer,
112
+ max_length: Optional[int] = None,
113
+ task=None,
114
+ label_list=None,
115
+ output_mode=None,
116
+ ):
117
+ if max_length is None:
118
+ max_length = tokenizer.model_max_length
119
+
120
+ if task is not None:
121
+ processor = glue_processors[task]()
122
+ if label_list is None:
123
+ label_list = processor.get_labels()
124
+ logger.info(f"Using label list {label_list} for task {task}")
125
+ if output_mode is None:
126
+ output_mode = glue_output_modes[task]
127
+ logger.info(f"Using output mode {output_mode} for task {task}")
128
+
129
+ label_map = {label: i for i, label in enumerate(label_list)}
130
+
131
+ def label_from_example(example: InputExample) -> Union[int, float, None]:
132
+ if example.label is None:
133
+ return None
134
+ if output_mode == "classification":
135
+ return label_map[example.label]
136
+ elif output_mode == "regression":
137
+ return float(example.label)
138
+ raise KeyError(output_mode)
139
+
140
+ labels = [label_from_example(example) for example in examples]
141
+
142
+ batch_encoding = tokenizer(
143
+ [(example.text_a, example.text_b) for example in examples],
144
+ max_length=max_length,
145
+ padding="max_length",
146
+ truncation=True,
147
+ )
148
+
149
+ features = []
150
+ for i in range(len(examples)):
151
+ inputs = {k: batch_encoding[k][i] for k in batch_encoding}
152
+
153
+ feature = InputFeatures(**inputs, label=labels[i])
154
+ features.append(feature)
155
+
156
+ for i, example in enumerate(examples[:5]):
157
+ logger.info("*** Example ***")
158
+ logger.info(f"guid: {example.guid}")
159
+ logger.info(f"features: {features[i]}")
160
+
161
+ return features
162
+
163
+
164
+ class OutputMode(Enum):
165
+ classification = "classification"
166
+ regression = "regression"
167
+
168
+
169
+ class MrpcProcessor(DataProcessor):
170
+ """Processor for the MRPC data set (GLUE version)."""
171
+
172
+ def __init__(self, *args, **kwargs):
173
+ super().__init__(*args, **kwargs)
174
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
175
+
176
+ def get_example_from_tensor_dict(self, tensor_dict):
177
+ """See base class."""
178
+ return InputExample(
179
+ tensor_dict["idx"].numpy(),
180
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
181
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
182
+ str(tensor_dict["label"].numpy()),
183
+ )
184
+
185
+ def get_train_examples(self, data_dir):
186
+ """See base class."""
187
+ logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}")
188
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
189
+
190
+ def get_dev_examples(self, data_dir):
191
+ """See base class."""
192
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
193
+
194
+ def get_test_examples(self, data_dir):
195
+ """See base class."""
196
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
197
+
198
+ def get_labels(self):
199
+ """See base class."""
200
+ return ["0", "1"]
201
+
202
+ def _create_examples(self, lines, set_type):
203
+ """Creates examples for the training, dev and test sets."""
204
+ examples = []
205
+ for i, line in enumerate(lines):
206
+ if i == 0:
207
+ continue
208
+ guid = f"{set_type}-{i}"
209
+ text_a = line[3]
210
+ text_b = line[4]
211
+ label = None if set_type == "test" else line[0]
212
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
213
+ return examples
214
+
215
+
216
+ class MnliProcessor(DataProcessor):
217
+ """Processor for the MultiNLI data set (GLUE version)."""
218
+
219
+ def __init__(self, *args, **kwargs):
220
+ super().__init__(*args, **kwargs)
221
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
222
+
223
+ def get_example_from_tensor_dict(self, tensor_dict):
224
+ """See base class."""
225
+ return InputExample(
226
+ tensor_dict["idx"].numpy(),
227
+ tensor_dict["premise"].numpy().decode("utf-8"),
228
+ tensor_dict["hypothesis"].numpy().decode("utf-8"),
229
+ str(tensor_dict["label"].numpy()),
230
+ )
231
+
232
+ def get_train_examples(self, data_dir):
233
+ """See base class."""
234
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
235
+
236
+ def get_dev_examples(self, data_dir):
237
+ """See base class."""
238
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
239
+
240
+ def get_test_examples(self, data_dir):
241
+ """See base class."""
242
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched")
243
+
244
+ def get_labels(self):
245
+ """See base class."""
246
+ return ["contradiction", "entailment", "neutral"]
247
+
248
+ def _create_examples(self, lines, set_type):
249
+ """Creates examples for the training, dev and test sets."""
250
+ examples = []
251
+ for i, line in enumerate(lines):
252
+ if i == 0:
253
+ continue
254
+ guid = f"{set_type}-{line[0]}"
255
+ text_a = line[8]
256
+ text_b = line[9]
257
+ label = None if set_type.startswith("test") else line[-1]
258
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
259
+ return examples
260
+
261
+
262
+ class MnliMismatchedProcessor(MnliProcessor):
263
+ """Processor for the MultiNLI Mismatched data set (GLUE version)."""
264
+
265
+ def __init__(self, *args, **kwargs):
266
+ super().__init__(*args, **kwargs)
267
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
268
+
269
+ def get_dev_examples(self, data_dir):
270
+ """See base class."""
271
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched")
272
+
273
+ def get_test_examples(self, data_dir):
274
+ """See base class."""
275
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched")
276
+
277
+
278
+ class ColaProcessor(DataProcessor):
279
+ """Processor for the CoLA data set (GLUE version)."""
280
+
281
+ def __init__(self, *args, **kwargs):
282
+ super().__init__(*args, **kwargs)
283
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
284
+
285
+ def get_example_from_tensor_dict(self, tensor_dict):
286
+ """See base class."""
287
+ return InputExample(
288
+ tensor_dict["idx"].numpy(),
289
+ tensor_dict["sentence"].numpy().decode("utf-8"),
290
+ None,
291
+ str(tensor_dict["label"].numpy()),
292
+ )
293
+
294
+ def get_train_examples(self, data_dir):
295
+ """See base class."""
296
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
297
+
298
+ def get_dev_examples(self, data_dir):
299
+ """See base class."""
300
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
301
+
302
+ def get_test_examples(self, data_dir):
303
+ """See base class."""
304
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
305
+
306
+ def get_labels(self):
307
+ """See base class."""
308
+ return ["0", "1"]
309
+
310
+ def _create_examples(self, lines, set_type):
311
+ """Creates examples for the training, dev and test sets."""
312
+ test_mode = set_type == "test"
313
+ if test_mode:
314
+ lines = lines[1:]
315
+ text_index = 1 if test_mode else 3
316
+ examples = []
317
+ for i, line in enumerate(lines):
318
+ guid = f"{set_type}-{i}"
319
+ text_a = line[text_index]
320
+ label = None if test_mode else line[1]
321
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
322
+ return examples
323
+
324
+
325
+ class Sst2Processor(DataProcessor):
326
+ """Processor for the SST-2 data set (GLUE version)."""
327
+
328
+ def __init__(self, *args, **kwargs):
329
+ super().__init__(*args, **kwargs)
330
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
331
+
332
+ def get_example_from_tensor_dict(self, tensor_dict):
333
+ """See base class."""
334
+ return InputExample(
335
+ tensor_dict["idx"].numpy(),
336
+ tensor_dict["sentence"].numpy().decode("utf-8"),
337
+ None,
338
+ str(tensor_dict["label"].numpy()),
339
+ )
340
+
341
+ def get_train_examples(self, data_dir):
342
+ """See base class."""
343
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
344
+
345
+ def get_dev_examples(self, data_dir):
346
+ """See base class."""
347
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
348
+
349
+ def get_test_examples(self, data_dir):
350
+ """See base class."""
351
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
352
+
353
+ def get_labels(self):
354
+ """See base class."""
355
+ return ["0", "1"]
356
+
357
+ def _create_examples(self, lines, set_type):
358
+ """Creates examples for the training, dev and test sets."""
359
+ examples = []
360
+ text_index = 1 if set_type == "test" else 0
361
+ for i, line in enumerate(lines):
362
+ if i == 0:
363
+ continue
364
+ guid = f"{set_type}-{i}"
365
+ text_a = line[text_index]
366
+ label = None if set_type == "test" else line[1]
367
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
368
+ return examples
369
+
370
+
371
+ class StsbProcessor(DataProcessor):
372
+ """Processor for the STS-B data set (GLUE version)."""
373
+
374
+ def __init__(self, *args, **kwargs):
375
+ super().__init__(*args, **kwargs)
376
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
377
+
378
+ def get_example_from_tensor_dict(self, tensor_dict):
379
+ """See base class."""
380
+ return InputExample(
381
+ tensor_dict["idx"].numpy(),
382
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
383
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
384
+ str(tensor_dict["label"].numpy()),
385
+ )
386
+
387
+ def get_train_examples(self, data_dir):
388
+ """See base class."""
389
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
390
+
391
+ def get_dev_examples(self, data_dir):
392
+ """See base class."""
393
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
394
+
395
+ def get_test_examples(self, data_dir):
396
+ """See base class."""
397
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
398
+
399
+ def get_labels(self):
400
+ """See base class."""
401
+ return [None]
402
+
403
+ def _create_examples(self, lines, set_type):
404
+ """Creates examples for the training, dev and test sets."""
405
+ examples = []
406
+ for i, line in enumerate(lines):
407
+ if i == 0:
408
+ continue
409
+ guid = f"{set_type}-{line[0]}"
410
+ text_a = line[7]
411
+ text_b = line[8]
412
+ label = None if set_type == "test" else line[-1]
413
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
414
+ return examples
415
+
416
+
417
+ class QqpProcessor(DataProcessor):
418
+ """Processor for the QQP data set (GLUE version)."""
419
+
420
+ def __init__(self, *args, **kwargs):
421
+ super().__init__(*args, **kwargs)
422
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
423
+
424
+ def get_example_from_tensor_dict(self, tensor_dict):
425
+ """See base class."""
426
+ return InputExample(
427
+ tensor_dict["idx"].numpy(),
428
+ tensor_dict["question1"].numpy().decode("utf-8"),
429
+ tensor_dict["question2"].numpy().decode("utf-8"),
430
+ str(tensor_dict["label"].numpy()),
431
+ )
432
+
433
+ def get_train_examples(self, data_dir):
434
+ """See base class."""
435
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
436
+
437
+ def get_dev_examples(self, data_dir):
438
+ """See base class."""
439
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
440
+
441
+ def get_test_examples(self, data_dir):
442
+ """See base class."""
443
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
444
+
445
+ def get_labels(self):
446
+ """See base class."""
447
+ return ["0", "1"]
448
+
449
+ def _create_examples(self, lines, set_type):
450
+ """Creates examples for the training, dev and test sets."""
451
+ test_mode = set_type == "test"
452
+ q1_index = 1 if test_mode else 3
453
+ q2_index = 2 if test_mode else 4
454
+ examples = []
455
+ for i, line in enumerate(lines):
456
+ if i == 0:
457
+ continue
458
+ guid = f"{set_type}-{line[0]}"
459
+ try:
460
+ text_a = line[q1_index]
461
+ text_b = line[q2_index]
462
+ label = None if test_mode else line[5]
463
+ except IndexError:
464
+ continue
465
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
466
+ return examples
467
+
468
+
469
+ class QnliProcessor(DataProcessor):
470
+ """Processor for the QNLI data set (GLUE version)."""
471
+
472
+ def __init__(self, *args, **kwargs):
473
+ super().__init__(*args, **kwargs)
474
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
475
+
476
+ def get_example_from_tensor_dict(self, tensor_dict):
477
+ """See base class."""
478
+ return InputExample(
479
+ tensor_dict["idx"].numpy(),
480
+ tensor_dict["question"].numpy().decode("utf-8"),
481
+ tensor_dict["sentence"].numpy().decode("utf-8"),
482
+ str(tensor_dict["label"].numpy()),
483
+ )
484
+
485
+ def get_train_examples(self, data_dir):
486
+ """See base class."""
487
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
488
+
489
+ def get_dev_examples(self, data_dir):
490
+ """See base class."""
491
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
492
+
493
+ def get_test_examples(self, data_dir):
494
+ """See base class."""
495
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
496
+
497
+ def get_labels(self):
498
+ """See base class."""
499
+ return ["entailment", "not_entailment"]
500
+
501
+ def _create_examples(self, lines, set_type):
502
+ """Creates examples for the training, dev and test sets."""
503
+ examples = []
504
+ for i, line in enumerate(lines):
505
+ if i == 0:
506
+ continue
507
+ guid = f"{set_type}-{line[0]}"
508
+ text_a = line[1]
509
+ text_b = line[2]
510
+ label = None if set_type == "test" else line[-1]
511
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
512
+ return examples
513
+
514
+
515
+ class RteProcessor(DataProcessor):
516
+ """Processor for the RTE data set (GLUE version)."""
517
+
518
+ def __init__(self, *args, **kwargs):
519
+ super().__init__(*args, **kwargs)
520
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
521
+
522
+ def get_example_from_tensor_dict(self, tensor_dict):
523
+ """See base class."""
524
+ return InputExample(
525
+ tensor_dict["idx"].numpy(),
526
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
527
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
528
+ str(tensor_dict["label"].numpy()),
529
+ )
530
+
531
+ def get_train_examples(self, data_dir):
532
+ """See base class."""
533
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
534
+
535
+ def get_dev_examples(self, data_dir):
536
+ """See base class."""
537
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
538
+
539
+ def get_test_examples(self, data_dir):
540
+ """See base class."""
541
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
542
+
543
+ def get_labels(self):
544
+ """See base class."""
545
+ return ["entailment", "not_entailment"]
546
+
547
+ def _create_examples(self, lines, set_type):
548
+ """Creates examples for the training, dev and test sets."""
549
+ examples = []
550
+ for i, line in enumerate(lines):
551
+ if i == 0:
552
+ continue
553
+ guid = f"{set_type}-{line[0]}"
554
+ text_a = line[1]
555
+ text_b = line[2]
556
+ label = None if set_type == "test" else line[-1]
557
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
558
+ return examples
559
+
560
+
561
+ class WnliProcessor(DataProcessor):
562
+ """Processor for the WNLI data set (GLUE version)."""
563
+
564
+ def __init__(self, *args, **kwargs):
565
+ super().__init__(*args, **kwargs)
566
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
567
+
568
+ def get_example_from_tensor_dict(self, tensor_dict):
569
+ """See base class."""
570
+ return InputExample(
571
+ tensor_dict["idx"].numpy(),
572
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
573
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
574
+ str(tensor_dict["label"].numpy()),
575
+ )
576
+
577
+ def get_train_examples(self, data_dir):
578
+ """See base class."""
579
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
580
+
581
+ def get_dev_examples(self, data_dir):
582
+ """See base class."""
583
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
584
+
585
+ def get_test_examples(self, data_dir):
586
+ """See base class."""
587
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
588
+
589
+ def get_labels(self):
590
+ """See base class."""
591
+ return ["0", "1"]
592
+
593
+ def _create_examples(self, lines, set_type):
594
+ """Creates examples for the training, dev and test sets."""
595
+ examples = []
596
+ for i, line in enumerate(lines):
597
+ if i == 0:
598
+ continue
599
+ guid = f"{set_type}-{line[0]}"
600
+ text_a = line[1]
601
+ text_b = line[2]
602
+ label = None if set_type == "test" else line[-1]
603
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
604
+ return examples
605
+
606
+
607
+ glue_tasks_num_labels = {
608
+ "cola": 2,
609
+ "mnli": 3,
610
+ "mrpc": 2,
611
+ "sst-2": 2,
612
+ "sts-b": 1,
613
+ "qqp": 2,
614
+ "qnli": 2,
615
+ "rte": 2,
616
+ "wnli": 2,
617
+ }
618
+
619
+ glue_processors = {
620
+ "cola": ColaProcessor,
621
+ "mnli": MnliProcessor,
622
+ "mnli-mm": MnliMismatchedProcessor,
623
+ "mrpc": MrpcProcessor,
624
+ "sst-2": Sst2Processor,
625
+ "sts-b": StsbProcessor,
626
+ "qqp": QqpProcessor,
627
+ "qnli": QnliProcessor,
628
+ "rte": RteProcessor,
629
+ "wnli": WnliProcessor,
630
+ }
631
+
632
+ glue_output_modes = {
633
+ "cola": "classification",
634
+ "mnli": "classification",
635
+ "mnli-mm": "classification",
636
+ "mrpc": "classification",
637
+ "sst-2": "classification",
638
+ "sts-b": "regression",
639
+ "qqp": "classification",
640
+ "qnli": "classification",
641
+ "rte": "classification",
642
+ "wnli": "classification",
643
+ }
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/squad.py ADDED
@@ -0,0 +1,845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ from functools import partial
18
+ from multiprocessing import Pool, cpu_count
19
+
20
+ import numpy as np
21
+ from tqdm import tqdm
22
+
23
+ from ...models.bert.tokenization_bert import whitespace_tokenize
24
+ from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy
25
+ from ...utils import is_tf_available, is_torch_available, logging
26
+ from .utils import DataProcessor
27
+
28
+
29
+ # Store the tokenizers which insert 2 separators tokens
30
+ MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"}
31
+
32
+
33
+ if is_torch_available():
34
+ import torch
35
+ from torch.utils.data import TensorDataset
36
+
37
+ if is_tf_available():
38
+ import tensorflow as tf
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
44
+ """Returns tokenized answer spans that better match the annotated answer."""
45
+ tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
46
+
47
+ for new_start in range(input_start, input_end + 1):
48
+ for new_end in range(input_end, new_start - 1, -1):
49
+ text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
50
+ if text_span == tok_answer_text:
51
+ return (new_start, new_end)
52
+
53
+ return (input_start, input_end)
54
+
55
+
56
+ def _check_is_max_context(doc_spans, cur_span_index, position):
57
+ """Check if this is the 'max context' doc span for the token."""
58
+ best_score = None
59
+ best_span_index = None
60
+ for span_index, doc_span in enumerate(doc_spans):
61
+ end = doc_span.start + doc_span.length - 1
62
+ if position < doc_span.start:
63
+ continue
64
+ if position > end:
65
+ continue
66
+ num_left_context = position - doc_span.start
67
+ num_right_context = end - position
68
+ score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
69
+ if best_score is None or score > best_score:
70
+ best_score = score
71
+ best_span_index = span_index
72
+
73
+ return cur_span_index == best_span_index
74
+
75
+
76
+ def _new_check_is_max_context(doc_spans, cur_span_index, position):
77
+ """Check if this is the 'max context' doc span for the token."""
78
+ # if len(doc_spans) == 1:
79
+ # return True
80
+ best_score = None
81
+ best_span_index = None
82
+ for span_index, doc_span in enumerate(doc_spans):
83
+ end = doc_span["start"] + doc_span["length"] - 1
84
+ if position < doc_span["start"]:
85
+ continue
86
+ if position > end:
87
+ continue
88
+ num_left_context = position - doc_span["start"]
89
+ num_right_context = end - position
90
+ score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
91
+ if best_score is None or score > best_score:
92
+ best_score = score
93
+ best_span_index = span_index
94
+
95
+ return cur_span_index == best_span_index
96
+
97
+
98
+ def _is_whitespace(c):
99
+ if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
100
+ return True
101
+ return False
102
+
103
+
104
+ def squad_convert_example_to_features(
105
+ example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training
106
+ ):
107
+ features = []
108
+ if is_training and not example.is_impossible:
109
+ # Get start and end position
110
+ start_position = example.start_position
111
+ end_position = example.end_position
112
+
113
+ # If the answer cannot be found in the text, then skip this example.
114
+ actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
115
+ cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
116
+ if actual_text.find(cleaned_answer_text) == -1:
117
+ logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'")
118
+ return []
119
+
120
+ tok_to_orig_index = []
121
+ orig_to_tok_index = []
122
+ all_doc_tokens = []
123
+ for i, token in enumerate(example.doc_tokens):
124
+ orig_to_tok_index.append(len(all_doc_tokens))
125
+ if tokenizer.__class__.__name__ in [
126
+ "RobertaTokenizer",
127
+ "LongformerTokenizer",
128
+ "BartTokenizer",
129
+ "RobertaTokenizerFast",
130
+ "LongformerTokenizerFast",
131
+ "BartTokenizerFast",
132
+ ]:
133
+ sub_tokens = tokenizer.tokenize(token, add_prefix_space=True)
134
+ else:
135
+ sub_tokens = tokenizer.tokenize(token)
136
+ for sub_token in sub_tokens:
137
+ tok_to_orig_index.append(i)
138
+ all_doc_tokens.append(sub_token)
139
+
140
+ if is_training and not example.is_impossible:
141
+ tok_start_position = orig_to_tok_index[example.start_position]
142
+ if example.end_position < len(example.doc_tokens) - 1:
143
+ tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
144
+ else:
145
+ tok_end_position = len(all_doc_tokens) - 1
146
+
147
+ (tok_start_position, tok_end_position) = _improve_answer_span(
148
+ all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
149
+ )
150
+
151
+ spans = []
152
+
153
+ truncated_query = tokenizer.encode(
154
+ example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length
155
+ )
156
+
157
+ # Tokenizers who insert 2 SEP tokens in-between <context> & <question> need to have special handling
158
+ # in the way they compute mask of added tokens.
159
+ tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower()
160
+ sequence_added_tokens = (
161
+ tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1
162
+ if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET
163
+ else tokenizer.model_max_length - tokenizer.max_len_single_sentence
164
+ )
165
+ sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair
166
+
167
+ span_doc_tokens = all_doc_tokens
168
+ while len(spans) * doc_stride < len(all_doc_tokens):
169
+ # Define the side we want to truncate / pad and the text/pair sorting
170
+ if tokenizer.padding_side == "right":
171
+ texts = truncated_query
172
+ pairs = span_doc_tokens
173
+ truncation = TruncationStrategy.ONLY_SECOND.value
174
+ else:
175
+ texts = span_doc_tokens
176
+ pairs = truncated_query
177
+ truncation = TruncationStrategy.ONLY_FIRST.value
178
+
179
+ encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic
180
+ texts,
181
+ pairs,
182
+ truncation=truncation,
183
+ padding=padding_strategy,
184
+ max_length=max_seq_length,
185
+ return_overflowing_tokens=True,
186
+ stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
187
+ return_token_type_ids=True,
188
+ )
189
+
190
+ paragraph_len = min(
191
+ len(all_doc_tokens) - len(spans) * doc_stride,
192
+ max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
193
+ )
194
+
195
+ if tokenizer.pad_token_id in encoded_dict["input_ids"]:
196
+ if tokenizer.padding_side == "right":
197
+ non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
198
+ else:
199
+ last_padding_id_position = (
200
+ len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id)
201
+ )
202
+ non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :]
203
+
204
+ else:
205
+ non_padded_ids = encoded_dict["input_ids"]
206
+
207
+ tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
208
+
209
+ token_to_orig_map = {}
210
+ for i in range(paragraph_len):
211
+ index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
212
+ token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
213
+
214
+ encoded_dict["paragraph_len"] = paragraph_len
215
+ encoded_dict["tokens"] = tokens
216
+ encoded_dict["token_to_orig_map"] = token_to_orig_map
217
+ encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
218
+ encoded_dict["token_is_max_context"] = {}
219
+ encoded_dict["start"] = len(spans) * doc_stride
220
+ encoded_dict["length"] = paragraph_len
221
+
222
+ spans.append(encoded_dict)
223
+
224
+ if "overflowing_tokens" not in encoded_dict or (
225
+ "overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0
226
+ ):
227
+ break
228
+ span_doc_tokens = encoded_dict["overflowing_tokens"]
229
+
230
+ for doc_span_index in range(len(spans)):
231
+ for j in range(spans[doc_span_index]["paragraph_len"]):
232
+ is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
233
+ index = (
234
+ j
235
+ if tokenizer.padding_side == "left"
236
+ else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
237
+ )
238
+ spans[doc_span_index]["token_is_max_context"][index] = is_max_context
239
+
240
+ for span in spans:
241
+ # Identify the position of the CLS token
242
+ cls_index = span["input_ids"].index(tokenizer.cls_token_id)
243
+
244
+ # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
245
+ # Original TF implementation also keep the classification token (set to 0)
246
+ p_mask = np.ones_like(span["token_type_ids"])
247
+ if tokenizer.padding_side == "right":
248
+ p_mask[len(truncated_query) + sequence_added_tokens :] = 0
249
+ else:
250
+ p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0
251
+
252
+ pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id)
253
+ special_token_indices = np.asarray(
254
+ tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True)
255
+ ).nonzero()
256
+
257
+ p_mask[pad_token_indices] = 1
258
+ p_mask[special_token_indices] = 1
259
+
260
+ # Set the cls index to 0: the CLS index can be used for impossible answers
261
+ p_mask[cls_index] = 0
262
+
263
+ span_is_impossible = example.is_impossible
264
+ start_position = 0
265
+ end_position = 0
266
+ if is_training and not span_is_impossible:
267
+ # For training, if our document chunk does not contain an annotation
268
+ # we throw it out, since there is nothing to predict.
269
+ doc_start = span["start"]
270
+ doc_end = span["start"] + span["length"] - 1
271
+ out_of_span = False
272
+
273
+ if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
274
+ out_of_span = True
275
+
276
+ if out_of_span:
277
+ start_position = cls_index
278
+ end_position = cls_index
279
+ span_is_impossible = True
280
+ else:
281
+ if tokenizer.padding_side == "left":
282
+ doc_offset = 0
283
+ else:
284
+ doc_offset = len(truncated_query) + sequence_added_tokens
285
+
286
+ start_position = tok_start_position - doc_start + doc_offset
287
+ end_position = tok_end_position - doc_start + doc_offset
288
+
289
+ features.append(
290
+ SquadFeatures(
291
+ span["input_ids"],
292
+ span["attention_mask"],
293
+ span["token_type_ids"],
294
+ cls_index,
295
+ p_mask.tolist(),
296
+ example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing.
297
+ unique_id=0,
298
+ paragraph_len=span["paragraph_len"],
299
+ token_is_max_context=span["token_is_max_context"],
300
+ tokens=span["tokens"],
301
+ token_to_orig_map=span["token_to_orig_map"],
302
+ start_position=start_position,
303
+ end_position=end_position,
304
+ is_impossible=span_is_impossible,
305
+ qas_id=example.qas_id,
306
+ )
307
+ )
308
+ return features
309
+
310
+
311
+ def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase):
312
+ global tokenizer
313
+ tokenizer = tokenizer_for_convert
314
+
315
+
316
+ def squad_convert_examples_to_features(
317
+ examples,
318
+ tokenizer,
319
+ max_seq_length,
320
+ doc_stride,
321
+ max_query_length,
322
+ is_training,
323
+ padding_strategy="max_length",
324
+ return_dataset=False,
325
+ threads=1,
326
+ tqdm_enabled=True,
327
+ ):
328
+ """
329
+ Converts a list of examples into a list of features that can be directly given as input to a model. It is
330
+ model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
331
+
332
+ Args:
333
+ examples: list of [`~data.processors.squad.SquadExample`]
334
+ tokenizer: an instance of a child of [`PreTrainedTokenizer`]
335
+ max_seq_length: The maximum sequence length of the inputs.
336
+ doc_stride: The stride used when the context is too large and is split across several features.
337
+ max_query_length: The maximum length of the query.
338
+ is_training: whether to create features for model evaluation or model training.
339
+ padding_strategy: Default to "max_length". Which padding strategy to use
340
+ return_dataset: Default False. Either 'pt' or 'tf'.
341
+ if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset
342
+ threads: multiple processing threads.
343
+
344
+
345
+ Returns:
346
+ list of [`~data.processors.squad.SquadFeatures`]
347
+
348
+ Example:
349
+
350
+ ```python
351
+ processor = SquadV2Processor()
352
+ examples = processor.get_dev_examples(data_dir)
353
+
354
+ features = squad_convert_examples_to_features(
355
+ examples=examples,
356
+ tokenizer=tokenizer,
357
+ max_seq_length=args.max_seq_length,
358
+ doc_stride=args.doc_stride,
359
+ max_query_length=args.max_query_length,
360
+ is_training=not evaluate,
361
+ )
362
+ ```"""
363
+ # Defining helper methods
364
+ features = []
365
+
366
+ threads = min(threads, cpu_count())
367
+ with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
368
+ annotate_ = partial(
369
+ squad_convert_example_to_features,
370
+ max_seq_length=max_seq_length,
371
+ doc_stride=doc_stride,
372
+ max_query_length=max_query_length,
373
+ padding_strategy=padding_strategy,
374
+ is_training=is_training,
375
+ )
376
+ features = list(
377
+ tqdm(
378
+ p.imap(annotate_, examples, chunksize=32),
379
+ total=len(examples),
380
+ desc="convert squad examples to features",
381
+ disable=not tqdm_enabled,
382
+ )
383
+ )
384
+
385
+ new_features = []
386
+ unique_id = 1000000000
387
+ example_index = 0
388
+ for example_features in tqdm(
389
+ features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled
390
+ ):
391
+ if not example_features:
392
+ continue
393
+ for example_feature in example_features:
394
+ example_feature.example_index = example_index
395
+ example_feature.unique_id = unique_id
396
+ new_features.append(example_feature)
397
+ unique_id += 1
398
+ example_index += 1
399
+ features = new_features
400
+ del new_features
401
+ if return_dataset == "pt":
402
+ if not is_torch_available():
403
+ raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
404
+
405
+ # Convert to Tensors and build dataset
406
+ all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
407
+ all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
408
+ all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
409
+ all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
410
+ all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
411
+ all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
412
+
413
+ if not is_training:
414
+ all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
415
+ dataset = TensorDataset(
416
+ all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask
417
+ )
418
+ else:
419
+ all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
420
+ all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
421
+ dataset = TensorDataset(
422
+ all_input_ids,
423
+ all_attention_masks,
424
+ all_token_type_ids,
425
+ all_start_positions,
426
+ all_end_positions,
427
+ all_cls_index,
428
+ all_p_mask,
429
+ all_is_impossible,
430
+ )
431
+
432
+ return features, dataset
433
+ elif return_dataset == "tf":
434
+ if not is_tf_available():
435
+ raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.")
436
+
437
+ def gen():
438
+ for i, ex in enumerate(features):
439
+ if ex.token_type_ids is None:
440
+ yield (
441
+ {
442
+ "input_ids": ex.input_ids,
443
+ "attention_mask": ex.attention_mask,
444
+ "feature_index": i,
445
+ "qas_id": ex.qas_id,
446
+ },
447
+ {
448
+ "start_positions": ex.start_position,
449
+ "end_positions": ex.end_position,
450
+ "cls_index": ex.cls_index,
451
+ "p_mask": ex.p_mask,
452
+ "is_impossible": ex.is_impossible,
453
+ },
454
+ )
455
+ else:
456
+ yield (
457
+ {
458
+ "input_ids": ex.input_ids,
459
+ "attention_mask": ex.attention_mask,
460
+ "token_type_ids": ex.token_type_ids,
461
+ "feature_index": i,
462
+ "qas_id": ex.qas_id,
463
+ },
464
+ {
465
+ "start_positions": ex.start_position,
466
+ "end_positions": ex.end_position,
467
+ "cls_index": ex.cls_index,
468
+ "p_mask": ex.p_mask,
469
+ "is_impossible": ex.is_impossible,
470
+ },
471
+ )
472
+
473
+ # Why have we split the batch into a tuple? PyTorch just has a list of tensors.
474
+ if "token_type_ids" in tokenizer.model_input_names:
475
+ train_types = (
476
+ {
477
+ "input_ids": tf.int32,
478
+ "attention_mask": tf.int32,
479
+ "token_type_ids": tf.int32,
480
+ "feature_index": tf.int64,
481
+ "qas_id": tf.string,
482
+ },
483
+ {
484
+ "start_positions": tf.int64,
485
+ "end_positions": tf.int64,
486
+ "cls_index": tf.int64,
487
+ "p_mask": tf.int32,
488
+ "is_impossible": tf.int32,
489
+ },
490
+ )
491
+
492
+ train_shapes = (
493
+ {
494
+ "input_ids": tf.TensorShape([None]),
495
+ "attention_mask": tf.TensorShape([None]),
496
+ "token_type_ids": tf.TensorShape([None]),
497
+ "feature_index": tf.TensorShape([]),
498
+ "qas_id": tf.TensorShape([]),
499
+ },
500
+ {
501
+ "start_positions": tf.TensorShape([]),
502
+ "end_positions": tf.TensorShape([]),
503
+ "cls_index": tf.TensorShape([]),
504
+ "p_mask": tf.TensorShape([None]),
505
+ "is_impossible": tf.TensorShape([]),
506
+ },
507
+ )
508
+ else:
509
+ train_types = (
510
+ {"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string},
511
+ {
512
+ "start_positions": tf.int64,
513
+ "end_positions": tf.int64,
514
+ "cls_index": tf.int64,
515
+ "p_mask": tf.int32,
516
+ "is_impossible": tf.int32,
517
+ },
518
+ )
519
+
520
+ train_shapes = (
521
+ {
522
+ "input_ids": tf.TensorShape([None]),
523
+ "attention_mask": tf.TensorShape([None]),
524
+ "feature_index": tf.TensorShape([]),
525
+ "qas_id": tf.TensorShape([]),
526
+ },
527
+ {
528
+ "start_positions": tf.TensorShape([]),
529
+ "end_positions": tf.TensorShape([]),
530
+ "cls_index": tf.TensorShape([]),
531
+ "p_mask": tf.TensorShape([None]),
532
+ "is_impossible": tf.TensorShape([]),
533
+ },
534
+ )
535
+
536
+ return tf.data.Dataset.from_generator(gen, train_types, train_shapes)
537
+ else:
538
+ return features
539
+
540
+
541
+ class SquadProcessor(DataProcessor):
542
+ """
543
+ Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and
544
+ version 2.0 of SQuAD, respectively.
545
+ """
546
+
547
+ train_file = None
548
+ dev_file = None
549
+
550
+ def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
551
+ if not evaluate:
552
+ answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
553
+ answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
554
+ answers = []
555
+ else:
556
+ answers = [
557
+ {"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
558
+ for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
559
+ ]
560
+
561
+ answer = None
562
+ answer_start = None
563
+
564
+ return SquadExample(
565
+ qas_id=tensor_dict["id"].numpy().decode("utf-8"),
566
+ question_text=tensor_dict["question"].numpy().decode("utf-8"),
567
+ context_text=tensor_dict["context"].numpy().decode("utf-8"),
568
+ answer_text=answer,
569
+ start_position_character=answer_start,
570
+ title=tensor_dict["title"].numpy().decode("utf-8"),
571
+ answers=answers,
572
+ )
573
+
574
+ def get_examples_from_dataset(self, dataset, evaluate=False):
575
+ """
576
+ Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.
577
+
578
+ Args:
579
+ dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")*
580
+ evaluate: Boolean specifying if in evaluation mode or in training mode
581
+
582
+ Returns:
583
+ List of SquadExample
584
+
585
+ Examples:
586
+
587
+ ```python
588
+ >>> import tensorflow_datasets as tfds
589
+
590
+ >>> dataset = tfds.load("squad")
591
+
592
+ >>> training_examples = get_examples_from_dataset(dataset, evaluate=False)
593
+ >>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
594
+ ```"""
595
+
596
+ if evaluate:
597
+ dataset = dataset["validation"]
598
+ else:
599
+ dataset = dataset["train"]
600
+
601
+ examples = []
602
+ for tensor_dict in tqdm(dataset):
603
+ examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
604
+
605
+ return examples
606
+
607
+ def get_train_examples(self, data_dir, filename=None):
608
+ """
609
+ Returns the training examples from the data directory.
610
+
611
+ Args:
612
+ data_dir: Directory containing the data files used for training and evaluating.
613
+ filename: None by default, specify this if the training file has a different name than the original one
614
+ which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
615
+
616
+ """
617
+ if data_dir is None:
618
+ data_dir = ""
619
+
620
+ if self.train_file is None:
621
+ raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
622
+
623
+ with open(
624
+ os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
625
+ ) as reader:
626
+ input_data = json.load(reader)["data"]
627
+ return self._create_examples(input_data, "train")
628
+
629
+ def get_dev_examples(self, data_dir, filename=None):
630
+ """
631
+ Returns the evaluation example from the data directory.
632
+
633
+ Args:
634
+ data_dir: Directory containing the data files used for training and evaluating.
635
+ filename: None by default, specify this if the evaluation file has a different name than the original one
636
+ which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
637
+ """
638
+ if data_dir is None:
639
+ data_dir = ""
640
+
641
+ if self.dev_file is None:
642
+ raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
643
+
644
+ with open(
645
+ os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
646
+ ) as reader:
647
+ input_data = json.load(reader)["data"]
648
+ return self._create_examples(input_data, "dev")
649
+
650
+ def _create_examples(self, input_data, set_type):
651
+ is_training = set_type == "train"
652
+ examples = []
653
+ for entry in tqdm(input_data):
654
+ title = entry["title"]
655
+ for paragraph in entry["paragraphs"]:
656
+ context_text = paragraph["context"]
657
+ for qa in paragraph["qas"]:
658
+ qas_id = qa["id"]
659
+ question_text = qa["question"]
660
+ start_position_character = None
661
+ answer_text = None
662
+ answers = []
663
+
664
+ is_impossible = qa.get("is_impossible", False)
665
+ if not is_impossible:
666
+ if is_training:
667
+ answer = qa["answers"][0]
668
+ answer_text = answer["text"]
669
+ start_position_character = answer["answer_start"]
670
+ else:
671
+ answers = qa["answers"]
672
+
673
+ example = SquadExample(
674
+ qas_id=qas_id,
675
+ question_text=question_text,
676
+ context_text=context_text,
677
+ answer_text=answer_text,
678
+ start_position_character=start_position_character,
679
+ title=title,
680
+ is_impossible=is_impossible,
681
+ answers=answers,
682
+ )
683
+ examples.append(example)
684
+ return examples
685
+
686
+
687
+ class SquadV1Processor(SquadProcessor):
688
+ train_file = "train-v1.1.json"
689
+ dev_file = "dev-v1.1.json"
690
+
691
+
692
+ class SquadV2Processor(SquadProcessor):
693
+ train_file = "train-v2.0.json"
694
+ dev_file = "dev-v2.0.json"
695
+
696
+
697
+ class SquadExample:
698
+ """
699
+ A single training/test example for the Squad dataset, as loaded from disk.
700
+
701
+ Args:
702
+ qas_id: The example's unique identifier
703
+ question_text: The question string
704
+ context_text: The context string
705
+ answer_text: The answer string
706
+ start_position_character: The character position of the start of the answer
707
+ title: The title of the example
708
+ answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
709
+ is_impossible: False by default, set to True if the example has no possible answer.
710
+ """
711
+
712
+ def __init__(
713
+ self,
714
+ qas_id,
715
+ question_text,
716
+ context_text,
717
+ answer_text,
718
+ start_position_character,
719
+ title,
720
+ answers=[],
721
+ is_impossible=False,
722
+ ):
723
+ self.qas_id = qas_id
724
+ self.question_text = question_text
725
+ self.context_text = context_text
726
+ self.answer_text = answer_text
727
+ self.title = title
728
+ self.is_impossible = is_impossible
729
+ self.answers = answers
730
+
731
+ self.start_position, self.end_position = 0, 0
732
+
733
+ doc_tokens = []
734
+ char_to_word_offset = []
735
+ prev_is_whitespace = True
736
+
737
+ # Split on whitespace so that different tokens may be attributed to their original position.
738
+ for c in self.context_text:
739
+ if _is_whitespace(c):
740
+ prev_is_whitespace = True
741
+ else:
742
+ if prev_is_whitespace:
743
+ doc_tokens.append(c)
744
+ else:
745
+ doc_tokens[-1] += c
746
+ prev_is_whitespace = False
747
+ char_to_word_offset.append(len(doc_tokens) - 1)
748
+
749
+ self.doc_tokens = doc_tokens
750
+ self.char_to_word_offset = char_to_word_offset
751
+
752
+ # Start and end positions only has a value during evaluation.
753
+ if start_position_character is not None and not is_impossible:
754
+ self.start_position = char_to_word_offset[start_position_character]
755
+ self.end_position = char_to_word_offset[
756
+ min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
757
+ ]
758
+
759
+
760
+ class SquadFeatures:
761
+ """
762
+ Single squad example features to be fed to a model. Those features are model-specific and can be crafted from
763
+ [`~data.processors.squad.SquadExample`] using the
764
+ :method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method.
765
+
766
+ Args:
767
+ input_ids: Indices of input sequence tokens in the vocabulary.
768
+ attention_mask: Mask to avoid performing attention on padding token indices.
769
+ token_type_ids: Segment token indices to indicate first and second portions of the inputs.
770
+ cls_index: the index of the CLS token.
771
+ p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
772
+ Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
773
+ example_index: the index of the example
774
+ unique_id: The unique Feature identifier
775
+ paragraph_len: The length of the context
776
+ token_is_max_context:
777
+ List of booleans identifying which tokens have their maximum context in this feature object. If a token
778
+ does not have their maximum context in this feature object, it means that another feature object has more
779
+ information related to that token and should be prioritized over this feature for that token.
780
+ tokens: list of tokens corresponding to the input ids
781
+ token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
782
+ start_position: start of the answer token index
783
+ end_position: end of the answer token index
784
+ encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods.
785
+ """
786
+
787
+ def __init__(
788
+ self,
789
+ input_ids,
790
+ attention_mask,
791
+ token_type_ids,
792
+ cls_index,
793
+ p_mask,
794
+ example_index,
795
+ unique_id,
796
+ paragraph_len,
797
+ token_is_max_context,
798
+ tokens,
799
+ token_to_orig_map,
800
+ start_position,
801
+ end_position,
802
+ is_impossible,
803
+ qas_id: str = None,
804
+ encoding: BatchEncoding = None,
805
+ ):
806
+ self.input_ids = input_ids
807
+ self.attention_mask = attention_mask
808
+ self.token_type_ids = token_type_ids
809
+ self.cls_index = cls_index
810
+ self.p_mask = p_mask
811
+
812
+ self.example_index = example_index
813
+ self.unique_id = unique_id
814
+ self.paragraph_len = paragraph_len
815
+ self.token_is_max_context = token_is_max_context
816
+ self.tokens = tokens
817
+ self.token_to_orig_map = token_to_orig_map
818
+
819
+ self.start_position = start_position
820
+ self.end_position = end_position
821
+ self.is_impossible = is_impossible
822
+ self.qas_id = qas_id
823
+
824
+ self.encoding = encoding
825
+
826
+
827
+ class SquadResult:
828
+ """
829
+ Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
830
+
831
+ Args:
832
+ unique_id: The unique identifier corresponding to that example.
833
+ start_logits: The logits corresponding to the start of the answer
834
+ end_logits: The logits corresponding to the end of the answer
835
+ """
836
+
837
+ def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
838
+ self.start_logits = start_logits
839
+ self.end_logits = end_logits
840
+ self.unique_id = unique_id
841
+
842
+ if start_top_index:
843
+ self.start_top_index = start_top_index
844
+ self.end_top_index = end_top_index
845
+ self.cls_logits = cls_logits
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/utils.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import csv
18
+ import dataclasses
19
+ import json
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Union
22
+
23
+ from ...utils import is_tf_available, is_torch_available, logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ @dataclass
30
+ class InputExample:
31
+ """
32
+ A single training/test example for simple sequence classification.
33
+
34
+ Args:
35
+ guid: Unique id for the example.
36
+ text_a: string. The untokenized text of the first sequence. For single
37
+ sequence tasks, only this sequence must be specified.
38
+ text_b: (Optional) string. The untokenized text of the second sequence.
39
+ Only must be specified for sequence pair tasks.
40
+ label: (Optional) string. The label of the example. This should be
41
+ specified for train and dev examples, but not for test examples.
42
+ """
43
+
44
+ guid: str
45
+ text_a: str
46
+ text_b: Optional[str] = None
47
+ label: Optional[str] = None
48
+
49
+ def to_json_string(self):
50
+ """Serializes this instance to a JSON string."""
51
+ return json.dumps(dataclasses.asdict(self), indent=2) + "\n"
52
+
53
+
54
+ @dataclass(frozen=True)
55
+ class InputFeatures:
56
+ """
57
+ A single set of features of data. Property names are the same names as the corresponding inputs to a model.
58
+
59
+ Args:
60
+ input_ids: Indices of input sequence tokens in the vocabulary.
61
+ attention_mask: Mask to avoid performing attention on padding token indices.
62
+ Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
63
+ tokens.
64
+ token_type_ids: (Optional) Segment token indices to indicate first and second
65
+ portions of the inputs. Only some models use them.
66
+ label: (Optional) Label corresponding to the input. Int for classification problems,
67
+ float for regression problems.
68
+ """
69
+
70
+ input_ids: List[int]
71
+ attention_mask: Optional[List[int]] = None
72
+ token_type_ids: Optional[List[int]] = None
73
+ label: Optional[Union[int, float]] = None
74
+
75
+ def to_json_string(self):
76
+ """Serializes this instance to a JSON string."""
77
+ return json.dumps(dataclasses.asdict(self)) + "\n"
78
+
79
+
80
+ class DataProcessor:
81
+ """Base class for data converters for sequence classification data sets."""
82
+
83
+ def get_example_from_tensor_dict(self, tensor_dict):
84
+ """
85
+ Gets an example from a dict with tensorflow tensors.
86
+
87
+ Args:
88
+ tensor_dict: Keys and values should match the corresponding Glue
89
+ tensorflow_dataset examples.
90
+ """
91
+ raise NotImplementedError()
92
+
93
+ def get_train_examples(self, data_dir):
94
+ """Gets a collection of [`InputExample`] for the train set."""
95
+ raise NotImplementedError()
96
+
97
+ def get_dev_examples(self, data_dir):
98
+ """Gets a collection of [`InputExample`] for the dev set."""
99
+ raise NotImplementedError()
100
+
101
+ def get_test_examples(self, data_dir):
102
+ """Gets a collection of [`InputExample`] for the test set."""
103
+ raise NotImplementedError()
104
+
105
+ def get_labels(self):
106
+ """Gets the list of labels for this data set."""
107
+ raise NotImplementedError()
108
+
109
+ def tfds_map(self, example):
110
+ """
111
+ Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
112
+ examples to the correct format.
113
+ """
114
+ if len(self.get_labels()) > 1:
115
+ example.label = self.get_labels()[int(example.label)]
116
+ return example
117
+
118
+ @classmethod
119
+ def _read_tsv(cls, input_file, quotechar=None):
120
+ """Reads a tab separated value file."""
121
+ with open(input_file, "r", encoding="utf-8-sig") as f:
122
+ return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
123
+
124
+
125
+ class SingleSentenceClassificationProcessor(DataProcessor):
126
+ """Generic processor for a single sentence classification data set."""
127
+
128
+ def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
129
+ self.labels = [] if labels is None else labels
130
+ self.examples = [] if examples is None else examples
131
+ self.mode = mode
132
+ self.verbose = verbose
133
+
134
+ def __len__(self):
135
+ return len(self.examples)
136
+
137
+ def __getitem__(self, idx):
138
+ if isinstance(idx, slice):
139
+ return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
140
+ return self.examples[idx]
141
+
142
+ @classmethod
143
+ def create_from_csv(
144
+ cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
145
+ ):
146
+ processor = cls(**kwargs)
147
+ processor.add_examples_from_csv(
148
+ file_name,
149
+ split_name=split_name,
150
+ column_label=column_label,
151
+ column_text=column_text,
152
+ column_id=column_id,
153
+ skip_first_row=skip_first_row,
154
+ overwrite_labels=True,
155
+ overwrite_examples=True,
156
+ )
157
+ return processor
158
+
159
+ @classmethod
160
+ def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
161
+ processor = cls(**kwargs)
162
+ processor.add_examples(texts_or_text_and_labels, labels=labels)
163
+ return processor
164
+
165
+ def add_examples_from_csv(
166
+ self,
167
+ file_name,
168
+ split_name="",
169
+ column_label=0,
170
+ column_text=1,
171
+ column_id=None,
172
+ skip_first_row=False,
173
+ overwrite_labels=False,
174
+ overwrite_examples=False,
175
+ ):
176
+ lines = self._read_tsv(file_name)
177
+ if skip_first_row:
178
+ lines = lines[1:]
179
+ texts = []
180
+ labels = []
181
+ ids = []
182
+ for i, line in enumerate(lines):
183
+ texts.append(line[column_text])
184
+ labels.append(line[column_label])
185
+ if column_id is not None:
186
+ ids.append(line[column_id])
187
+ else:
188
+ guid = f"{split_name}-{i}" if split_name else str(i)
189
+ ids.append(guid)
190
+
191
+ return self.add_examples(
192
+ texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
193
+ )
194
+
195
+ def add_examples(
196
+ self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
197
+ ):
198
+ if labels is not None and len(texts_or_text_and_labels) != len(labels):
199
+ raise ValueError(
200
+ f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}"
201
+ )
202
+ if ids is not None and len(texts_or_text_and_labels) != len(ids):
203
+ raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}")
204
+ if ids is None:
205
+ ids = [None] * len(texts_or_text_and_labels)
206
+ if labels is None:
207
+ labels = [None] * len(texts_or_text_and_labels)
208
+ examples = []
209
+ added_labels = set()
210
+ for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids):
211
+ if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
212
+ text, label = text_or_text_and_label
213
+ else:
214
+ text = text_or_text_and_label
215
+ added_labels.add(label)
216
+ examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
217
+
218
+ # Update examples
219
+ if overwrite_examples:
220
+ self.examples = examples
221
+ else:
222
+ self.examples.extend(examples)
223
+
224
+ # Update labels
225
+ if overwrite_labels:
226
+ self.labels = list(added_labels)
227
+ else:
228
+ self.labels = list(set(self.labels).union(added_labels))
229
+
230
+ return self.examples
231
+
232
+ def get_features(
233
+ self,
234
+ tokenizer,
235
+ max_length=None,
236
+ pad_on_left=False,
237
+ pad_token=0,
238
+ mask_padding_with_zero=True,
239
+ return_tensors=None,
240
+ ):
241
+ """
242
+ Convert examples in a list of `InputFeatures`
243
+
244
+ Args:
245
+ tokenizer: Instance of a tokenizer that will tokenize the examples
246
+ max_length: Maximum example length
247
+ pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
248
+ pad_token: Padding token
249
+ mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
250
+ and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
251
+ values)
252
+
253
+ Returns:
254
+ If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the
255
+ task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific
256
+ `InputFeatures` which can be fed to the model.
257
+
258
+ """
259
+ if max_length is None:
260
+ max_length = tokenizer.max_len
261
+
262
+ label_map = {label: i for i, label in enumerate(self.labels)}
263
+
264
+ all_input_ids = []
265
+ for ex_index, example in enumerate(self.examples):
266
+ if ex_index % 10000 == 0:
267
+ logger.info(f"Tokenizing example {ex_index}")
268
+
269
+ input_ids = tokenizer.encode(
270
+ example.text_a,
271
+ add_special_tokens=True,
272
+ max_length=min(max_length, tokenizer.max_len),
273
+ )
274
+ all_input_ids.append(input_ids)
275
+
276
+ batch_length = max(len(input_ids) for input_ids in all_input_ids)
277
+
278
+ features = []
279
+ for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):
280
+ if ex_index % 10000 == 0:
281
+ logger.info(f"Writing example {ex_index}/{len(self.examples)}")
282
+ # The mask has 1 for real tokens and 0 for padding tokens. Only real
283
+ # tokens are attended to.
284
+ attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
285
+
286
+ # Zero-pad up to the sequence length.
287
+ padding_length = batch_length - len(input_ids)
288
+ if pad_on_left:
289
+ input_ids = ([pad_token] * padding_length) + input_ids
290
+ attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
291
+ else:
292
+ input_ids = input_ids + ([pad_token] * padding_length)
293
+ attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
294
+
295
+ if len(input_ids) != batch_length:
296
+ raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}")
297
+ if len(attention_mask) != batch_length:
298
+ raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}")
299
+
300
+ if self.mode == "classification":
301
+ label = label_map[example.label]
302
+ elif self.mode == "regression":
303
+ label = float(example.label)
304
+ else:
305
+ raise ValueError(self.mode)
306
+
307
+ if ex_index < 5 and self.verbose:
308
+ logger.info("*** Example ***")
309
+ logger.info(f"guid: {example.guid}")
310
+ logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
311
+ logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
312
+ logger.info(f"label: {example.label} (id = {label})")
313
+
314
+ features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
315
+
316
+ if return_tensors is None:
317
+ return features
318
+ elif return_tensors == "tf":
319
+ if not is_tf_available():
320
+ raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
321
+ import tensorflow as tf
322
+
323
+ def gen():
324
+ for ex in features:
325
+ yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
326
+
327
+ dataset = tf.data.Dataset.from_generator(
328
+ gen,
329
+ ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
330
+ ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
331
+ )
332
+ return dataset
333
+ elif return_tensors == "pt":
334
+ if not is_torch_available():
335
+ raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
336
+ import torch
337
+ from torch.utils.data import TensorDataset
338
+
339
+ all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
340
+ all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
341
+ if self.mode == "classification":
342
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
343
+ elif self.mode == "regression":
344
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
345
+
346
+ dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
347
+ return dataset
348
+ else:
349
+ raise ValueError("return_tensors should be one of 'tf' or 'pt'")
evalkit_internvl/lib/python3.10/site-packages/transformers/data/processors/xnli.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ XNLI utils (dataset loading and evaluation)"""
17
+
18
+
19
+ import os
20
+
21
+ from ...utils import logging
22
+ from .utils import DataProcessor, InputExample
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class XnliProcessor(DataProcessor):
29
+ """
30
+ Processor for the XNLI dataset. Adapted from
31
+ https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
32
+ """
33
+
34
+ def __init__(self, language, train_language=None):
35
+ self.language = language
36
+ self.train_language = train_language
37
+
38
+ def get_train_examples(self, data_dir):
39
+ """See base class."""
40
+ lg = self.language if self.train_language is None else self.train_language
41
+ lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv"))
42
+ examples = []
43
+ for i, line in enumerate(lines):
44
+ if i == 0:
45
+ continue
46
+ guid = f"train-{i}"
47
+ text_a = line[0]
48
+ text_b = line[1]
49
+ label = "contradiction" if line[2] == "contradictory" else line[2]
50
+ if not isinstance(text_a, str):
51
+ raise ValueError(f"Training input {text_a} is not a string")
52
+ if not isinstance(text_b, str):
53
+ raise ValueError(f"Training input {text_b} is not a string")
54
+ if not isinstance(label, str):
55
+ raise ValueError(f"Training label {label} is not a string")
56
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
57
+ return examples
58
+
59
+ def get_test_examples(self, data_dir):
60
+ """See base class."""
61
+ lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv"))
62
+ examples = []
63
+ for i, line in enumerate(lines):
64
+ if i == 0:
65
+ continue
66
+ language = line[0]
67
+ if language != self.language:
68
+ continue
69
+ guid = f"test-{i}"
70
+ text_a = line[6]
71
+ text_b = line[7]
72
+ label = line[1]
73
+ if not isinstance(text_a, str):
74
+ raise ValueError(f"Training input {text_a} is not a string")
75
+ if not isinstance(text_b, str):
76
+ raise ValueError(f"Training input {text_b} is not a string")
77
+ if not isinstance(label, str):
78
+ raise ValueError(f"Training label {label} is not a string")
79
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
80
+ return examples
81
+
82
+ def get_labels(self):
83
+ """See base class."""
84
+ return ["contradiction", "entailment", "neutral"]
85
+
86
+
87
+ xnli_processors = {
88
+ "xnli": XnliProcessor,
89
+ }
90
+
91
+ xnli_output_modes = {
92
+ "xnli": "classification",
93
+ }
94
+
95
+ xnli_tasks_num_labels = {
96
+ "xnli": 3,
97
+ }
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/auto_pipeline.cpython-310.pyc ADDED
Binary file (39.5 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/free_init_utils.cpython-310.pyc ADDED
Binary file (6.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/onnx_utils.cpython-310.pyc ADDED
Binary file (6.98 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/__pycache__/pipeline_flax_utils.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {"pipeline_output": ["AnimateDiffPipelineOutput"]}
15
+
16
+ try:
17
+ if not (is_transformers_available() and is_torch_available()):
18
+ raise OptionalDependencyNotAvailable()
19
+ except OptionalDependencyNotAvailable:
20
+ from ...utils import dummy_torch_and_transformers_objects
21
+
22
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
23
+ else:
24
+ _import_structure["pipeline_animatediff"] = ["AnimateDiffPipeline"]
25
+ _import_structure["pipeline_animatediff_video2video"] = ["AnimateDiffVideoToVideoPipeline"]
26
+
27
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
28
+ try:
29
+ if not (is_transformers_available() and is_torch_available()):
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ from ...utils.dummy_torch_and_transformers_objects import *
33
+
34
+ else:
35
+ from .pipeline_animatediff import AnimateDiffPipeline
36
+ from .pipeline_animatediff_video2video import AnimateDiffVideoToVideoPipeline
37
+ from .pipeline_output import AnimateDiffPipelineOutput
38
+
39
+ else:
40
+ import sys
41
+
42
+ sys.modules[__name__] = _LazyModule(
43
+ __name__,
44
+ globals()["__file__"],
45
+ _import_structure,
46
+ module_spec=__spec__,
47
+ )
48
+ for name, value in _dummy_objects.items():
49
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__pycache__/pipeline_animatediff.cpython-310.pyc ADDED
Binary file (25.5 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__pycache__/pipeline_animatediff_video2video.cpython-310.pyc ADDED
Binary file (30.9 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/__pycache__/pipeline_output.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/pipeline_animatediff_video2video.py ADDED
@@ -0,0 +1,997 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
21
+
22
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
23
+ from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
24
+ from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel
25
+ from ...models.lora import adjust_lora_scale_text_encoder
26
+ from ...models.unets.unet_motion_model import MotionAdapter
27
+ from ...schedulers import (
28
+ DDIMScheduler,
29
+ DPMSolverMultistepScheduler,
30
+ EulerAncestralDiscreteScheduler,
31
+ EulerDiscreteScheduler,
32
+ LMSDiscreteScheduler,
33
+ PNDMScheduler,
34
+ )
35
+ from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
36
+ from ...utils.torch_utils import randn_tensor
37
+ from ..free_init_utils import FreeInitMixin
38
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
39
+ from .pipeline_output import AnimateDiffPipelineOutput
40
+
41
+
42
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
43
+
44
+ EXAMPLE_DOC_STRING = """
45
+ Examples:
46
+ ```py
47
+ >>> import imageio
48
+ >>> import requests
49
+ >>> import torch
50
+ >>> from diffusers import AnimateDiffVideoToVideoPipeline, DDIMScheduler, MotionAdapter
51
+ >>> from diffusers.utils import export_to_gif
52
+ >>> from io import BytesIO
53
+ >>> from PIL import Image
54
+
55
+ >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
56
+ >>> pipe = AnimateDiffVideoToVideoPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=adapter).to("cuda")
57
+ >>> pipe.scheduler = DDIMScheduler(beta_schedule="linear", steps_offset=1, clip_sample=False, timespace_spacing="linspace")
58
+
59
+ >>> def load_video(file_path: str):
60
+ ... images = []
61
+ ...
62
+ ... if file_path.startswith(('http://', 'https://')):
63
+ ... # If the file_path is a URL
64
+ ... response = requests.get(file_path)
65
+ ... response.raise_for_status()
66
+ ... content = BytesIO(response.content)
67
+ ... vid = imageio.get_reader(content)
68
+ ... else:
69
+ ... # Assuming it's a local file path
70
+ ... vid = imageio.get_reader(file_path)
71
+ ...
72
+ ... for frame in vid:
73
+ ... pil_image = Image.fromarray(frame)
74
+ ... images.append(pil_image)
75
+ ...
76
+ ... return images
77
+
78
+ >>> video = load_video("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif")
79
+ >>> output = pipe(video=video, prompt="panda playing a guitar, on a boat, in the ocean, high quality", strength=0.5)
80
+ >>> frames = output.frames[0]
81
+ >>> export_to_gif(frames, "animation.gif")
82
+ ```
83
+ """
84
+
85
+
86
+ # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid
87
+ def tensor2vid(video: torch.Tensor, processor, output_type="np"):
88
+ batch_size, channels, num_frames, height, width = video.shape
89
+ outputs = []
90
+ for batch_idx in range(batch_size):
91
+ batch_vid = video[batch_idx].permute(1, 0, 2, 3)
92
+ batch_output = processor.postprocess(batch_vid, output_type)
93
+
94
+ outputs.append(batch_output)
95
+
96
+ if output_type == "np":
97
+ outputs = np.stack(outputs)
98
+
99
+ elif output_type == "pt":
100
+ outputs = torch.stack(outputs)
101
+
102
+ elif not output_type == "pil":
103
+ raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
104
+
105
+ return outputs
106
+
107
+
108
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
109
+ def retrieve_latents(
110
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
111
+ ):
112
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
113
+ return encoder_output.latent_dist.sample(generator)
114
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
115
+ return encoder_output.latent_dist.mode()
116
+ elif hasattr(encoder_output, "latents"):
117
+ return encoder_output.latents
118
+ else:
119
+ raise AttributeError("Could not access latents of provided encoder_output")
120
+
121
+
122
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
123
+ def retrieve_timesteps(
124
+ scheduler,
125
+ num_inference_steps: Optional[int] = None,
126
+ device: Optional[Union[str, torch.device]] = None,
127
+ timesteps: Optional[List[int]] = None,
128
+ **kwargs,
129
+ ):
130
+ """
131
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
132
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
133
+
134
+ Args:
135
+ scheduler (`SchedulerMixin`):
136
+ The scheduler to get timesteps from.
137
+ num_inference_steps (`int`):
138
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
139
+ `timesteps` must be `None`.
140
+ device (`str` or `torch.device`, *optional*):
141
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
142
+ timesteps (`List[int]`, *optional*):
143
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
144
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
145
+ must be `None`.
146
+
147
+ Returns:
148
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
149
+ second element is the number of inference steps.
150
+ """
151
+ if timesteps is not None:
152
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
153
+ if not accepts_timesteps:
154
+ raise ValueError(
155
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
156
+ f" timestep schedules. Please check whether you are using the correct scheduler."
157
+ )
158
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
159
+ timesteps = scheduler.timesteps
160
+ num_inference_steps = len(timesteps)
161
+ else:
162
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
163
+ timesteps = scheduler.timesteps
164
+ return timesteps, num_inference_steps
165
+
166
+
167
+ class AnimateDiffVideoToVideoPipeline(
168
+ DiffusionPipeline,
169
+ StableDiffusionMixin,
170
+ TextualInversionLoaderMixin,
171
+ IPAdapterMixin,
172
+ LoraLoaderMixin,
173
+ FreeInitMixin,
174
+ ):
175
+ r"""
176
+ Pipeline for video-to-video generation.
177
+
178
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
179
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
180
+
181
+ The pipeline also inherits the following loading methods:
182
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
183
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
184
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
185
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
186
+
187
+ Args:
188
+ vae ([`AutoencoderKL`]):
189
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
190
+ text_encoder ([`CLIPTextModel`]):
191
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
192
+ tokenizer (`CLIPTokenizer`):
193
+ A [`~transformers.CLIPTokenizer`] to tokenize text.
194
+ unet ([`UNet2DConditionModel`]):
195
+ A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
196
+ motion_adapter ([`MotionAdapter`]):
197
+ A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
198
+ scheduler ([`SchedulerMixin`]):
199
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
200
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
201
+ """
202
+
203
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
204
+ _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"]
205
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
206
+
207
+ def __init__(
208
+ self,
209
+ vae: AutoencoderKL,
210
+ text_encoder: CLIPTextModel,
211
+ tokenizer: CLIPTokenizer,
212
+ unet: UNet2DConditionModel,
213
+ motion_adapter: MotionAdapter,
214
+ scheduler: Union[
215
+ DDIMScheduler,
216
+ PNDMScheduler,
217
+ LMSDiscreteScheduler,
218
+ EulerDiscreteScheduler,
219
+ EulerAncestralDiscreteScheduler,
220
+ DPMSolverMultistepScheduler,
221
+ ],
222
+ feature_extractor: CLIPImageProcessor = None,
223
+ image_encoder: CLIPVisionModelWithProjection = None,
224
+ ):
225
+ super().__init__()
226
+ if isinstance(unet, UNet2DConditionModel):
227
+ unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
228
+
229
+ self.register_modules(
230
+ vae=vae,
231
+ text_encoder=text_encoder,
232
+ tokenizer=tokenizer,
233
+ unet=unet,
234
+ motion_adapter=motion_adapter,
235
+ scheduler=scheduler,
236
+ feature_extractor=feature_extractor,
237
+ image_encoder=image_encoder,
238
+ )
239
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
240
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
241
+
242
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
243
+ def encode_prompt(
244
+ self,
245
+ prompt,
246
+ device,
247
+ num_images_per_prompt,
248
+ do_classifier_free_guidance,
249
+ negative_prompt=None,
250
+ prompt_embeds: Optional[torch.FloatTensor] = None,
251
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
252
+ lora_scale: Optional[float] = None,
253
+ clip_skip: Optional[int] = None,
254
+ ):
255
+ r"""
256
+ Encodes the prompt into text encoder hidden states.
257
+
258
+ Args:
259
+ prompt (`str` or `List[str]`, *optional*):
260
+ prompt to be encoded
261
+ device: (`torch.device`):
262
+ torch device
263
+ num_images_per_prompt (`int`):
264
+ number of images that should be generated per prompt
265
+ do_classifier_free_guidance (`bool`):
266
+ whether to use classifier free guidance or not
267
+ negative_prompt (`str` or `List[str]`, *optional*):
268
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
269
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
270
+ less than `1`).
271
+ prompt_embeds (`torch.FloatTensor`, *optional*):
272
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
273
+ provided, text embeddings will be generated from `prompt` input argument.
274
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
275
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
276
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
277
+ argument.
278
+ lora_scale (`float`, *optional*):
279
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
280
+ clip_skip (`int`, *optional*):
281
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
282
+ the output of the pre-final layer will be used for computing the prompt embeddings.
283
+ """
284
+ # set lora scale so that monkey patched LoRA
285
+ # function of text encoder can correctly access it
286
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
287
+ self._lora_scale = lora_scale
288
+
289
+ # dynamically adjust the LoRA scale
290
+ if not USE_PEFT_BACKEND:
291
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
292
+ else:
293
+ scale_lora_layers(self.text_encoder, lora_scale)
294
+
295
+ if prompt is not None and isinstance(prompt, str):
296
+ batch_size = 1
297
+ elif prompt is not None and isinstance(prompt, list):
298
+ batch_size = len(prompt)
299
+ else:
300
+ batch_size = prompt_embeds.shape[0]
301
+
302
+ if prompt_embeds is None:
303
+ # textual inversion: process multi-vector tokens if necessary
304
+ if isinstance(self, TextualInversionLoaderMixin):
305
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
306
+
307
+ text_inputs = self.tokenizer(
308
+ prompt,
309
+ padding="max_length",
310
+ max_length=self.tokenizer.model_max_length,
311
+ truncation=True,
312
+ return_tensors="pt",
313
+ )
314
+ text_input_ids = text_inputs.input_ids
315
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
316
+
317
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
318
+ text_input_ids, untruncated_ids
319
+ ):
320
+ removed_text = self.tokenizer.batch_decode(
321
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
322
+ )
323
+ logger.warning(
324
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
325
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
326
+ )
327
+
328
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
329
+ attention_mask = text_inputs.attention_mask.to(device)
330
+ else:
331
+ attention_mask = None
332
+
333
+ if clip_skip is None:
334
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
335
+ prompt_embeds = prompt_embeds[0]
336
+ else:
337
+ prompt_embeds = self.text_encoder(
338
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
339
+ )
340
+ # Access the `hidden_states` first, that contains a tuple of
341
+ # all the hidden states from the encoder layers. Then index into
342
+ # the tuple to access the hidden states from the desired layer.
343
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
344
+ # We also need to apply the final LayerNorm here to not mess with the
345
+ # representations. The `last_hidden_states` that we typically use for
346
+ # obtaining the final prompt representations passes through the LayerNorm
347
+ # layer.
348
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
349
+
350
+ if self.text_encoder is not None:
351
+ prompt_embeds_dtype = self.text_encoder.dtype
352
+ elif self.unet is not None:
353
+ prompt_embeds_dtype = self.unet.dtype
354
+ else:
355
+ prompt_embeds_dtype = prompt_embeds.dtype
356
+
357
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
358
+
359
+ bs_embed, seq_len, _ = prompt_embeds.shape
360
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
361
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
362
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
363
+
364
+ # get unconditional embeddings for classifier free guidance
365
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
366
+ uncond_tokens: List[str]
367
+ if negative_prompt is None:
368
+ uncond_tokens = [""] * batch_size
369
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
370
+ raise TypeError(
371
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
372
+ f" {type(prompt)}."
373
+ )
374
+ elif isinstance(negative_prompt, str):
375
+ uncond_tokens = [negative_prompt]
376
+ elif batch_size != len(negative_prompt):
377
+ raise ValueError(
378
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
379
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
380
+ " the batch size of `prompt`."
381
+ )
382
+ else:
383
+ uncond_tokens = negative_prompt
384
+
385
+ # textual inversion: process multi-vector tokens if necessary
386
+ if isinstance(self, TextualInversionLoaderMixin):
387
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
388
+
389
+ max_length = prompt_embeds.shape[1]
390
+ uncond_input = self.tokenizer(
391
+ uncond_tokens,
392
+ padding="max_length",
393
+ max_length=max_length,
394
+ truncation=True,
395
+ return_tensors="pt",
396
+ )
397
+
398
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
399
+ attention_mask = uncond_input.attention_mask.to(device)
400
+ else:
401
+ attention_mask = None
402
+
403
+ negative_prompt_embeds = self.text_encoder(
404
+ uncond_input.input_ids.to(device),
405
+ attention_mask=attention_mask,
406
+ )
407
+ negative_prompt_embeds = negative_prompt_embeds[0]
408
+
409
+ if do_classifier_free_guidance:
410
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
411
+ seq_len = negative_prompt_embeds.shape[1]
412
+
413
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
414
+
415
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
416
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
417
+
418
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
419
+ # Retrieve the original scale by scaling back the LoRA layers
420
+ unscale_lora_layers(self.text_encoder, lora_scale)
421
+
422
+ return prompt_embeds, negative_prompt_embeds
423
+
424
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
425
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
426
+ dtype = next(self.image_encoder.parameters()).dtype
427
+
428
+ if not isinstance(image, torch.Tensor):
429
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
430
+
431
+ image = image.to(device=device, dtype=dtype)
432
+ if output_hidden_states:
433
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
434
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
435
+ uncond_image_enc_hidden_states = self.image_encoder(
436
+ torch.zeros_like(image), output_hidden_states=True
437
+ ).hidden_states[-2]
438
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
439
+ num_images_per_prompt, dim=0
440
+ )
441
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
442
+ else:
443
+ image_embeds = self.image_encoder(image).image_embeds
444
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
445
+ uncond_image_embeds = torch.zeros_like(image_embeds)
446
+
447
+ return image_embeds, uncond_image_embeds
448
+
449
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
450
+ def prepare_ip_adapter_image_embeds(
451
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
452
+ ):
453
+ if ip_adapter_image_embeds is None:
454
+ if not isinstance(ip_adapter_image, list):
455
+ ip_adapter_image = [ip_adapter_image]
456
+
457
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
458
+ raise ValueError(
459
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
460
+ )
461
+
462
+ image_embeds = []
463
+ for single_ip_adapter_image, image_proj_layer in zip(
464
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
465
+ ):
466
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
467
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
468
+ single_ip_adapter_image, device, 1, output_hidden_state
469
+ )
470
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
471
+ single_negative_image_embeds = torch.stack(
472
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
473
+ )
474
+
475
+ if do_classifier_free_guidance:
476
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
477
+ single_image_embeds = single_image_embeds.to(device)
478
+
479
+ image_embeds.append(single_image_embeds)
480
+ else:
481
+ repeat_dims = [1]
482
+ image_embeds = []
483
+ for single_image_embeds in ip_adapter_image_embeds:
484
+ if do_classifier_free_guidance:
485
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
486
+ single_image_embeds = single_image_embeds.repeat(
487
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
488
+ )
489
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
490
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
491
+ )
492
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
493
+ else:
494
+ single_image_embeds = single_image_embeds.repeat(
495
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
496
+ )
497
+ image_embeds.append(single_image_embeds)
498
+
499
+ return image_embeds
500
+
501
+ # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
502
+ def decode_latents(self, latents):
503
+ latents = 1 / self.vae.config.scaling_factor * latents
504
+
505
+ batch_size, channels, num_frames, height, width = latents.shape
506
+ latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
507
+
508
+ image = self.vae.decode(latents).sample
509
+ video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4)
510
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
511
+ video = video.float()
512
+ return video
513
+
514
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
515
+ def prepare_extra_step_kwargs(self, generator, eta):
516
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
517
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
518
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
519
+ # and should be between [0, 1]
520
+
521
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
522
+ extra_step_kwargs = {}
523
+ if accepts_eta:
524
+ extra_step_kwargs["eta"] = eta
525
+
526
+ # check if the scheduler accepts generator
527
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
528
+ if accepts_generator:
529
+ extra_step_kwargs["generator"] = generator
530
+ return extra_step_kwargs
531
+
532
+ def check_inputs(
533
+ self,
534
+ prompt,
535
+ strength,
536
+ height,
537
+ width,
538
+ video=None,
539
+ latents=None,
540
+ negative_prompt=None,
541
+ prompt_embeds=None,
542
+ negative_prompt_embeds=None,
543
+ ip_adapter_image=None,
544
+ ip_adapter_image_embeds=None,
545
+ callback_on_step_end_tensor_inputs=None,
546
+ ):
547
+ if strength < 0 or strength > 1:
548
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
549
+
550
+ if height % 8 != 0 or width % 8 != 0:
551
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
552
+
553
+ if callback_on_step_end_tensor_inputs is not None and not all(
554
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
555
+ ):
556
+ raise ValueError(
557
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
558
+ )
559
+
560
+ if prompt is not None and prompt_embeds is not None:
561
+ raise ValueError(
562
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
563
+ " only forward one of the two."
564
+ )
565
+ elif prompt is None and prompt_embeds is None:
566
+ raise ValueError(
567
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
568
+ )
569
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
570
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
571
+
572
+ if negative_prompt is not None and negative_prompt_embeds is not None:
573
+ raise ValueError(
574
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
575
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
576
+ )
577
+
578
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
579
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
580
+ raise ValueError(
581
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
582
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
583
+ f" {negative_prompt_embeds.shape}."
584
+ )
585
+
586
+ if video is not None and latents is not None:
587
+ raise ValueError("Only one of `video` or `latents` should be provided")
588
+
589
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
590
+ raise ValueError(
591
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
592
+ )
593
+
594
+ if ip_adapter_image_embeds is not None:
595
+ if not isinstance(ip_adapter_image_embeds, list):
596
+ raise ValueError(
597
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
598
+ )
599
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
600
+ raise ValueError(
601
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
602
+ )
603
+
604
+ def get_timesteps(self, num_inference_steps, timesteps, strength, device):
605
+ # get the original timestep using init_timestep
606
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
607
+
608
+ t_start = max(num_inference_steps - init_timestep, 0)
609
+ timesteps = timesteps[t_start * self.scheduler.order :]
610
+
611
+ return timesteps, num_inference_steps - t_start
612
+
613
+ def prepare_latents(
614
+ self,
615
+ video,
616
+ height,
617
+ width,
618
+ num_channels_latents,
619
+ batch_size,
620
+ timestep,
621
+ dtype,
622
+ device,
623
+ generator,
624
+ latents=None,
625
+ ):
626
+ # video must be a list of list of images
627
+ # the outer list denotes having multiple videos as input, whereas inner list means the frames of the video
628
+ # as a list of images
629
+ if not isinstance(video[0], list):
630
+ video = [video]
631
+ if latents is None:
632
+ video = torch.cat(
633
+ [self.image_processor.preprocess(vid, height=height, width=width).unsqueeze(0) for vid in video], dim=0
634
+ )
635
+ video = video.to(device=device, dtype=dtype)
636
+ num_frames = video.shape[1]
637
+ else:
638
+ num_frames = latents.shape[2]
639
+
640
+ shape = (
641
+ batch_size,
642
+ num_channels_latents,
643
+ num_frames,
644
+ height // self.vae_scale_factor,
645
+ width // self.vae_scale_factor,
646
+ )
647
+
648
+ if isinstance(generator, list) and len(generator) != batch_size:
649
+ raise ValueError(
650
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
651
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
652
+ )
653
+
654
+ if latents is None:
655
+ # make sure the VAE is in float32 mode, as it overflows in float16
656
+ if self.vae.config.force_upcast:
657
+ video = video.float()
658
+ self.vae.to(dtype=torch.float32)
659
+
660
+ if isinstance(generator, list):
661
+ if len(generator) != batch_size:
662
+ raise ValueError(
663
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
664
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
665
+ )
666
+
667
+ init_latents = [
668
+ retrieve_latents(self.vae.encode(video[i]), generator=generator[i]).unsqueeze(0)
669
+ for i in range(batch_size)
670
+ ]
671
+ else:
672
+ init_latents = [
673
+ retrieve_latents(self.vae.encode(vid), generator=generator).unsqueeze(0) for vid in video
674
+ ]
675
+
676
+ init_latents = torch.cat(init_latents, dim=0)
677
+
678
+ # restore vae to original dtype
679
+ if self.vae.config.force_upcast:
680
+ self.vae.to(dtype)
681
+
682
+ init_latents = init_latents.to(dtype)
683
+ init_latents = self.vae.config.scaling_factor * init_latents
684
+
685
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
686
+ # expand init_latents for batch_size
687
+ error_message = (
688
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
689
+ " images (`image`). Please make sure to update your script to pass as many initial images as text prompts"
690
+ )
691
+ raise ValueError(error_message)
692
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
693
+ raise ValueError(
694
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
695
+ )
696
+ else:
697
+ init_latents = torch.cat([init_latents], dim=0)
698
+
699
+ noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
700
+ latents = self.scheduler.add_noise(init_latents, noise, timestep).permute(0, 2, 1, 3, 4)
701
+ else:
702
+ if shape != latents.shape:
703
+ # [B, C, F, H, W]
704
+ raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}")
705
+ latents = latents.to(device, dtype=dtype)
706
+
707
+ return latents
708
+
709
+ @property
710
+ def guidance_scale(self):
711
+ return self._guidance_scale
712
+
713
+ @property
714
+ def clip_skip(self):
715
+ return self._clip_skip
716
+
717
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
718
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
719
+ # corresponds to doing no classifier free guidance.
720
+ @property
721
+ def do_classifier_free_guidance(self):
722
+ return self._guidance_scale > 1
723
+
724
+ @property
725
+ def cross_attention_kwargs(self):
726
+ return self._cross_attention_kwargs
727
+
728
+ @property
729
+ def num_timesteps(self):
730
+ return self._num_timesteps
731
+
732
+ @torch.no_grad()
733
+ def __call__(
734
+ self,
735
+ video: List[List[PipelineImageInput]] = None,
736
+ prompt: Optional[Union[str, List[str]]] = None,
737
+ height: Optional[int] = None,
738
+ width: Optional[int] = None,
739
+ num_inference_steps: int = 50,
740
+ timesteps: Optional[List[int]] = None,
741
+ guidance_scale: float = 7.5,
742
+ strength: float = 0.8,
743
+ negative_prompt: Optional[Union[str, List[str]]] = None,
744
+ num_videos_per_prompt: Optional[int] = 1,
745
+ eta: float = 0.0,
746
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
747
+ latents: Optional[torch.FloatTensor] = None,
748
+ prompt_embeds: Optional[torch.FloatTensor] = None,
749
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
750
+ ip_adapter_image: Optional[PipelineImageInput] = None,
751
+ ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
752
+ output_type: Optional[str] = "pil",
753
+ return_dict: bool = True,
754
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
755
+ clip_skip: Optional[int] = None,
756
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
757
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
758
+ ):
759
+ r"""
760
+ The call function to the pipeline for generation.
761
+
762
+ Args:
763
+ video (`List[PipelineImageInput]`):
764
+ The input video to condition the generation on. Must be a list of images/frames of the video.
765
+ prompt (`str` or `List[str]`, *optional*):
766
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
767
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
768
+ The height in pixels of the generated video.
769
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
770
+ The width in pixels of the generated video.
771
+ num_inference_steps (`int`, *optional*, defaults to 50):
772
+ The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
773
+ expense of slower inference.
774
+ strength (`float`, *optional*, defaults to 0.8):
775
+ Higher strength leads to more differences between original video and generated video.
776
+ guidance_scale (`float`, *optional*, defaults to 7.5):
777
+ A higher guidance scale value encourages the model to generate images closely linked to the text
778
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
779
+ negative_prompt (`str` or `List[str]`, *optional*):
780
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
781
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
782
+ eta (`float`, *optional*, defaults to 0.0):
783
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
784
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
785
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
786
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
787
+ generation deterministic.
788
+ latents (`torch.FloatTensor`, *optional*):
789
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
790
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
791
+ tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
792
+ `(batch_size, num_channel, num_frames, height, width)`.
793
+ prompt_embeds (`torch.FloatTensor`, *optional*):
794
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
795
+ provided, text embeddings are generated from the `prompt` input argument.
796
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
797
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
798
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
799
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
800
+ Optional image input to work with IP Adapters.
801
+ ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
802
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
803
+ Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
804
+ if `do_classifier_free_guidance` is set to `True`.
805
+ If not provided, embeddings are computed from the `ip_adapter_image` input argument.
806
+ output_type (`str`, *optional*, defaults to `"pil"`):
807
+ The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or
808
+ `np.array`.
809
+ return_dict (`bool`, *optional*, defaults to `True`):
810
+ Whether or not to return a [`AnimateDiffPipelineOutput`] instead
811
+ of a plain tuple.
812
+ cross_attention_kwargs (`dict`, *optional*):
813
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
814
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
815
+ clip_skip (`int`, *optional*):
816
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
817
+ the output of the pre-final layer will be used for computing the prompt embeddings.
818
+ callback_on_step_end (`Callable`, *optional*):
819
+ A function that calls at the end of each denoising steps during the inference. The function is called
820
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
821
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
822
+ `callback_on_step_end_tensor_inputs`.
823
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
824
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
825
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
826
+ `._callback_tensor_inputs` attribute of your pipeine class.
827
+
828
+ Examples:
829
+
830
+ Returns:
831
+ [`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
832
+ If `return_dict` is `True`, [`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
833
+ returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
834
+ """
835
+
836
+ # 0. Default height and width to unet
837
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
838
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
839
+
840
+ num_videos_per_prompt = 1
841
+
842
+ # 1. Check inputs. Raise error if not correct
843
+ self.check_inputs(
844
+ prompt=prompt,
845
+ strength=strength,
846
+ height=height,
847
+ width=width,
848
+ negative_prompt=negative_prompt,
849
+ prompt_embeds=prompt_embeds,
850
+ negative_prompt_embeds=negative_prompt_embeds,
851
+ video=video,
852
+ latents=latents,
853
+ ip_adapter_image=ip_adapter_image,
854
+ ip_adapter_image_embeds=ip_adapter_image_embeds,
855
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
856
+ )
857
+
858
+ self._guidance_scale = guidance_scale
859
+ self._clip_skip = clip_skip
860
+ self._cross_attention_kwargs = cross_attention_kwargs
861
+
862
+ # 2. Define call parameters
863
+ if prompt is not None and isinstance(prompt, str):
864
+ batch_size = 1
865
+ elif prompt is not None and isinstance(prompt, list):
866
+ batch_size = len(prompt)
867
+ else:
868
+ batch_size = prompt_embeds.shape[0]
869
+
870
+ device = self._execution_device
871
+
872
+ # 3. Encode input prompt
873
+ text_encoder_lora_scale = (
874
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
875
+ )
876
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
877
+ prompt,
878
+ device,
879
+ num_videos_per_prompt,
880
+ self.do_classifier_free_guidance,
881
+ negative_prompt,
882
+ prompt_embeds=prompt_embeds,
883
+ negative_prompt_embeds=negative_prompt_embeds,
884
+ lora_scale=text_encoder_lora_scale,
885
+ clip_skip=self.clip_skip,
886
+ )
887
+
888
+ # For classifier free guidance, we need to do two forward passes.
889
+ # Here we concatenate the unconditional and text embeddings into a single batch
890
+ # to avoid doing two forward passes
891
+ if self.do_classifier_free_guidance:
892
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
893
+
894
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
895
+ image_embeds = self.prepare_ip_adapter_image_embeds(
896
+ ip_adapter_image,
897
+ ip_adapter_image_embeds,
898
+ device,
899
+ batch_size * num_videos_per_prompt,
900
+ self.do_classifier_free_guidance,
901
+ )
902
+
903
+ # 4. Prepare timesteps
904
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
905
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device)
906
+ latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt)
907
+
908
+ # 5. Prepare latent variables
909
+ num_channels_latents = self.unet.config.in_channels
910
+ latents = self.prepare_latents(
911
+ video=video,
912
+ height=height,
913
+ width=width,
914
+ num_channels_latents=num_channels_latents,
915
+ batch_size=batch_size * num_videos_per_prompt,
916
+ timestep=latent_timestep,
917
+ dtype=prompt_embeds.dtype,
918
+ device=device,
919
+ generator=generator,
920
+ latents=latents,
921
+ )
922
+
923
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
924
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
925
+
926
+ # 7. Add image embeds for IP-Adapter
927
+ added_cond_kwargs = (
928
+ {"image_embeds": image_embeds}
929
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
930
+ else None
931
+ )
932
+
933
+ num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1
934
+ for free_init_iter in range(num_free_init_iters):
935
+ if self.free_init_enabled:
936
+ latents, timesteps = self._apply_free_init(
937
+ latents, free_init_iter, num_inference_steps, device, latents.dtype, generator
938
+ )
939
+ num_inference_steps = len(timesteps)
940
+ # make sure to readjust timesteps based on strength
941
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device)
942
+
943
+ self._num_timesteps = len(timesteps)
944
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
945
+
946
+ # 8. Denoising loop
947
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
948
+ for i, t in enumerate(timesteps):
949
+ # expand the latents if we are doing classifier free guidance
950
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
951
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
952
+
953
+ # predict the noise residual
954
+ noise_pred = self.unet(
955
+ latent_model_input,
956
+ t,
957
+ encoder_hidden_states=prompt_embeds,
958
+ cross_attention_kwargs=self.cross_attention_kwargs,
959
+ added_cond_kwargs=added_cond_kwargs,
960
+ ).sample
961
+
962
+ # perform guidance
963
+ if self.do_classifier_free_guidance:
964
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
965
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
966
+
967
+ # compute the previous noisy sample x_t -> x_t-1
968
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
969
+
970
+ if callback_on_step_end is not None:
971
+ callback_kwargs = {}
972
+ for k in callback_on_step_end_tensor_inputs:
973
+ callback_kwargs[k] = locals()[k]
974
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
975
+
976
+ latents = callback_outputs.pop("latents", latents)
977
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
978
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
979
+
980
+ # call the callback, if provided
981
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
982
+ progress_bar.update()
983
+
984
+ # 9. Post-processing
985
+ if output_type == "latent":
986
+ video = latents
987
+ else:
988
+ video_tensor = self.decode_latents(latents)
989
+ video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
990
+
991
+ # 10. Offload all models
992
+ self.maybe_free_model_hooks()
993
+
994
+ if not return_dict:
995
+ return (video,)
996
+
997
+ return AnimateDiffPipelineOutput(frames=video)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/animatediff/pipeline_output.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+ import torch
7
+
8
+ from ...utils import BaseOutput
9
+
10
+
11
+ @dataclass
12
+ class AnimateDiffPipelineOutput(BaseOutput):
13
+ r"""
14
+ Output class for AnimateDiff pipelines.
15
+
16
+ Args:
17
+ frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
18
+ List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised
19
+ PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
20
+ `(batch_size, num_frames, channels, height, width)`
21
+ """
22
+
23
+ frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]]
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/controlnet/__init__.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_flax_available,
9
+ is_torch_available,
10
+ is_transformers_available,
11
+ )
12
+
13
+
14
+ _dummy_objects = {}
15
+ _import_structure = {}
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["multicontrolnet"] = ["MultiControlNetModel"]
26
+ _import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"]
27
+ _import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"]
28
+ _import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"]
29
+ _import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"]
30
+ _import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"]
31
+ _import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"]
32
+ _import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"]
33
+ try:
34
+ if not (is_transformers_available() and is_flax_available()):
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ from ...utils import dummy_flax_and_transformers_objects # noqa F403
38
+
39
+ _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects))
40
+ else:
41
+ _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"]
42
+
43
+
44
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
45
+ try:
46
+ if not (is_transformers_available() and is_torch_available()):
47
+ raise OptionalDependencyNotAvailable()
48
+
49
+ except OptionalDependencyNotAvailable:
50
+ from ...utils.dummy_torch_and_transformers_objects import *
51
+ else:
52
+ from .multicontrolnet import MultiControlNetModel
53
+ from .pipeline_controlnet import StableDiffusionControlNetPipeline
54
+ from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline
55
+ from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline
56
+ from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
57
+ from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline
58
+ from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline
59
+ from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline
60
+
61
+ try:
62
+ if not (is_transformers_available() and is_flax_available()):
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ from ...utils.dummy_flax_and_transformers_objects import * # noqa F403
66
+ else:
67
+ from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
68
+
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(
74
+ __name__,
75
+ globals()["__file__"],
76
+ _import_structure,
77
+ module_spec=__spec__,
78
+ )
79
+ for name, value in _dummy_objects.items():
80
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import warnings
16
+ from functools import partial
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import jax
20
+ import jax.numpy as jnp
21
+ import numpy as np
22
+ from flax.core.frozen_dict import FrozenDict
23
+ from flax.jax_utils import unreplicate
24
+ from flax.training.common_utils import shard
25
+ from PIL import Image
26
+ from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel
27
+
28
+ from ...models import FlaxAutoencoderKL, FlaxControlNetModel, FlaxUNet2DConditionModel
29
+ from ...schedulers import (
30
+ FlaxDDIMScheduler,
31
+ FlaxDPMSolverMultistepScheduler,
32
+ FlaxLMSDiscreteScheduler,
33
+ FlaxPNDMScheduler,
34
+ )
35
+ from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring
36
+ from ..pipeline_flax_utils import FlaxDiffusionPipeline
37
+ from ..stable_diffusion import FlaxStableDiffusionPipelineOutput
38
+ from ..stable_diffusion.safety_checker_flax import FlaxStableDiffusionSafetyChecker
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+ # Set to True to use python for loop instead of jax.fori_loop for easier debugging
44
+ DEBUG = False
45
+
46
+ EXAMPLE_DOC_STRING = """
47
+ Examples:
48
+ ```py
49
+ >>> import jax
50
+ >>> import numpy as np
51
+ >>> import jax.numpy as jnp
52
+ >>> from flax.jax_utils import replicate
53
+ >>> from flax.training.common_utils import shard
54
+ >>> from diffusers.utils import load_image, make_image_grid
55
+ >>> from PIL import Image
56
+ >>> from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel
57
+
58
+
59
+ >>> def create_key(seed=0):
60
+ ... return jax.random.PRNGKey(seed)
61
+
62
+
63
+ >>> rng = create_key(0)
64
+
65
+ >>> # get canny image
66
+ >>> canny_image = load_image(
67
+ ... "https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg"
68
+ ... )
69
+
70
+ >>> prompts = "best quality, extremely detailed"
71
+ >>> negative_prompts = "monochrome, lowres, bad anatomy, worst quality, low quality"
72
+
73
+ >>> # load control net and stable diffusion v1-5
74
+ >>> controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
75
+ ... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32
76
+ ... )
77
+ >>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
78
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32
79
+ ... )
80
+ >>> params["controlnet"] = controlnet_params
81
+
82
+ >>> num_samples = jax.device_count()
83
+ >>> rng = jax.random.split(rng, jax.device_count())
84
+
85
+ >>> prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
86
+ >>> negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples)
87
+ >>> processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
88
+
89
+ >>> p_params = replicate(params)
90
+ >>> prompt_ids = shard(prompt_ids)
91
+ >>> negative_prompt_ids = shard(negative_prompt_ids)
92
+ >>> processed_image = shard(processed_image)
93
+
94
+ >>> output = pipe(
95
+ ... prompt_ids=prompt_ids,
96
+ ... image=processed_image,
97
+ ... params=p_params,
98
+ ... prng_seed=rng,
99
+ ... num_inference_steps=50,
100
+ ... neg_prompt_ids=negative_prompt_ids,
101
+ ... jit=True,
102
+ ... ).images
103
+
104
+ >>> output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))
105
+ >>> output_images = make_image_grid(output_images, num_samples // 4, 4)
106
+ >>> output_images.save("generated_image.png")
107
+ ```
108
+ """
109
+
110
+
111
+ class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline):
112
+ r"""
113
+ Flax-based pipeline for text-to-image generation using Stable Diffusion with ControlNet Guidance.
114
+
115
+ This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods
116
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
117
+
118
+ Args:
119
+ vae ([`FlaxAutoencoderKL`]):
120
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
121
+ text_encoder ([`~transformers.FlaxCLIPTextModel`]):
122
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
123
+ tokenizer ([`~transformers.CLIPTokenizer`]):
124
+ A `CLIPTokenizer` to tokenize text.
125
+ unet ([`FlaxUNet2DConditionModel`]):
126
+ A `FlaxUNet2DConditionModel` to denoise the encoded image latents.
127
+ controlnet ([`FlaxControlNetModel`]:
128
+ Provides additional conditioning to the `unet` during the denoising process.
129
+ scheduler ([`SchedulerMixin`]):
130
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
131
+ [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or
132
+ [`FlaxDPMSolverMultistepScheduler`].
133
+ safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
134
+ Classification module that estimates whether generated images could be considered offensive or harmful.
135
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
136
+ about a model's potential harms.
137
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
138
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
139
+ """
140
+
141
+ def __init__(
142
+ self,
143
+ vae: FlaxAutoencoderKL,
144
+ text_encoder: FlaxCLIPTextModel,
145
+ tokenizer: CLIPTokenizer,
146
+ unet: FlaxUNet2DConditionModel,
147
+ controlnet: FlaxControlNetModel,
148
+ scheduler: Union[
149
+ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler
150
+ ],
151
+ safety_checker: FlaxStableDiffusionSafetyChecker,
152
+ feature_extractor: CLIPFeatureExtractor,
153
+ dtype: jnp.dtype = jnp.float32,
154
+ ):
155
+ super().__init__()
156
+ self.dtype = dtype
157
+
158
+ if safety_checker is None:
159
+ logger.warning(
160
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
161
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
162
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
163
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
164
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
165
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
166
+ )
167
+
168
+ self.register_modules(
169
+ vae=vae,
170
+ text_encoder=text_encoder,
171
+ tokenizer=tokenizer,
172
+ unet=unet,
173
+ controlnet=controlnet,
174
+ scheduler=scheduler,
175
+ safety_checker=safety_checker,
176
+ feature_extractor=feature_extractor,
177
+ )
178
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
179
+
180
+ def prepare_text_inputs(self, prompt: Union[str, List[str]]):
181
+ if not isinstance(prompt, (str, list)):
182
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
183
+
184
+ text_input = self.tokenizer(
185
+ prompt,
186
+ padding="max_length",
187
+ max_length=self.tokenizer.model_max_length,
188
+ truncation=True,
189
+ return_tensors="np",
190
+ )
191
+
192
+ return text_input.input_ids
193
+
194
+ def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]):
195
+ if not isinstance(image, (Image.Image, list)):
196
+ raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}")
197
+
198
+ if isinstance(image, Image.Image):
199
+ image = [image]
200
+
201
+ processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image])
202
+
203
+ return processed_images
204
+
205
+ def _get_has_nsfw_concepts(self, features, params):
206
+ has_nsfw_concepts = self.safety_checker(features, params)
207
+ return has_nsfw_concepts
208
+
209
+ def _run_safety_checker(self, images, safety_model_params, jit=False):
210
+ # safety_model_params should already be replicated when jit is True
211
+ pil_images = [Image.fromarray(image) for image in images]
212
+ features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
213
+
214
+ if jit:
215
+ features = shard(features)
216
+ has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params)
217
+ has_nsfw_concepts = unshard(has_nsfw_concepts)
218
+ safety_model_params = unreplicate(safety_model_params)
219
+ else:
220
+ has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params)
221
+
222
+ images_was_copied = False
223
+ for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
224
+ if has_nsfw_concept:
225
+ if not images_was_copied:
226
+ images_was_copied = True
227
+ images = images.copy()
228
+
229
+ images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image
230
+
231
+ if any(has_nsfw_concepts):
232
+ warnings.warn(
233
+ "Potential NSFW content was detected in one or more images. A black image will be returned"
234
+ " instead. Try again with a different prompt and/or seed."
235
+ )
236
+
237
+ return images, has_nsfw_concepts
238
+
239
+ def _generate(
240
+ self,
241
+ prompt_ids: jnp.ndarray,
242
+ image: jnp.ndarray,
243
+ params: Union[Dict, FrozenDict],
244
+ prng_seed: jax.Array,
245
+ num_inference_steps: int,
246
+ guidance_scale: float,
247
+ latents: Optional[jnp.ndarray] = None,
248
+ neg_prompt_ids: Optional[jnp.ndarray] = None,
249
+ controlnet_conditioning_scale: float = 1.0,
250
+ ):
251
+ height, width = image.shape[-2:]
252
+ if height % 64 != 0 or width % 64 != 0:
253
+ raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.")
254
+
255
+ # get prompt text embeddings
256
+ prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
257
+
258
+ # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
259
+ # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
260
+ batch_size = prompt_ids.shape[0]
261
+
262
+ max_length = prompt_ids.shape[-1]
263
+
264
+ if neg_prompt_ids is None:
265
+ uncond_input = self.tokenizer(
266
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
267
+ ).input_ids
268
+ else:
269
+ uncond_input = neg_prompt_ids
270
+ negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
271
+ context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
272
+
273
+ image = jnp.concatenate([image] * 2)
274
+
275
+ latents_shape = (
276
+ batch_size,
277
+ self.unet.config.in_channels,
278
+ height // self.vae_scale_factor,
279
+ width // self.vae_scale_factor,
280
+ )
281
+ if latents is None:
282
+ latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32)
283
+ else:
284
+ if latents.shape != latents_shape:
285
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
286
+
287
+ def loop_body(step, args):
288
+ latents, scheduler_state = args
289
+ # For classifier free guidance, we need to do two forward passes.
290
+ # Here we concatenate the unconditional and text embeddings into a single batch
291
+ # to avoid doing two forward passes
292
+ latents_input = jnp.concatenate([latents] * 2)
293
+
294
+ t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
295
+ timestep = jnp.broadcast_to(t, latents_input.shape[0])
296
+
297
+ latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t)
298
+
299
+ down_block_res_samples, mid_block_res_sample = self.controlnet.apply(
300
+ {"params": params["controlnet"]},
301
+ jnp.array(latents_input),
302
+ jnp.array(timestep, dtype=jnp.int32),
303
+ encoder_hidden_states=context,
304
+ controlnet_cond=image,
305
+ conditioning_scale=controlnet_conditioning_scale,
306
+ return_dict=False,
307
+ )
308
+
309
+ # predict the noise residual
310
+ noise_pred = self.unet.apply(
311
+ {"params": params["unet"]},
312
+ jnp.array(latents_input),
313
+ jnp.array(timestep, dtype=jnp.int32),
314
+ encoder_hidden_states=context,
315
+ down_block_additional_residuals=down_block_res_samples,
316
+ mid_block_additional_residual=mid_block_res_sample,
317
+ ).sample
318
+
319
+ # perform guidance
320
+ noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0)
321
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
322
+
323
+ # compute the previous noisy sample x_t -> x_t-1
324
+ latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
325
+ return latents, scheduler_state
326
+
327
+ scheduler_state = self.scheduler.set_timesteps(
328
+ params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape
329
+ )
330
+
331
+ # scale the initial noise by the standard deviation required by the scheduler
332
+ latents = latents * params["scheduler"].init_noise_sigma
333
+
334
+ if DEBUG:
335
+ # run with python for loop
336
+ for i in range(num_inference_steps):
337
+ latents, scheduler_state = loop_body(i, (latents, scheduler_state))
338
+ else:
339
+ latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state))
340
+
341
+ # scale and decode the image latents with vae
342
+ latents = 1 / self.vae.config.scaling_factor * latents
343
+ image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample
344
+
345
+ image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
346
+ return image
347
+
348
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
349
+ def __call__(
350
+ self,
351
+ prompt_ids: jnp.ndarray,
352
+ image: jnp.ndarray,
353
+ params: Union[Dict, FrozenDict],
354
+ prng_seed: jax.Array,
355
+ num_inference_steps: int = 50,
356
+ guidance_scale: Union[float, jnp.ndarray] = 7.5,
357
+ latents: jnp.ndarray = None,
358
+ neg_prompt_ids: jnp.ndarray = None,
359
+ controlnet_conditioning_scale: Union[float, jnp.ndarray] = 1.0,
360
+ return_dict: bool = True,
361
+ jit: bool = False,
362
+ ):
363
+ r"""
364
+ The call function to the pipeline for generation.
365
+
366
+ Args:
367
+ prompt_ids (`jnp.ndarray`):
368
+ The prompt or prompts to guide the image generation.
369
+ image (`jnp.ndarray`):
370
+ Array representing the ControlNet input condition to provide guidance to the `unet` for generation.
371
+ params (`Dict` or `FrozenDict`):
372
+ Dictionary containing the model parameters/weights.
373
+ prng_seed (`jax.Array`):
374
+ Array containing random number generator key.
375
+ num_inference_steps (`int`, *optional*, defaults to 50):
376
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
377
+ expense of slower inference.
378
+ guidance_scale (`float`, *optional*, defaults to 7.5):
379
+ A higher guidance scale value encourages the model to generate images closely linked to the text
380
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
381
+ latents (`jnp.ndarray`, *optional*):
382
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
383
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
384
+ array is generated by sampling using the supplied random `generator`.
385
+ controlnet_conditioning_scale (`float` or `jnp.ndarray`, *optional*, defaults to 1.0):
386
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
387
+ to the residual in the original `unet`.
388
+ return_dict (`bool`, *optional*, defaults to `True`):
389
+ Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of
390
+ a plain tuple.
391
+ jit (`bool`, defaults to `False`):
392
+ Whether to run `pmap` versions of the generation and safety scoring functions.
393
+
394
+ <Tip warning={true}>
395
+
396
+ This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a
397
+ future release.
398
+
399
+ </Tip>
400
+
401
+ Examples:
402
+
403
+ Returns:
404
+ [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`:
405
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is
406
+ returned, otherwise a `tuple` is returned where the first element is a list with the generated images
407
+ and the second element is a list of `bool`s indicating whether the corresponding generated image
408
+ contains "not-safe-for-work" (nsfw) content.
409
+ """
410
+
411
+ height, width = image.shape[-2:]
412
+
413
+ if isinstance(guidance_scale, float):
414
+ # Convert to a tensor so each device gets a copy. Follow the prompt_ids for
415
+ # shape information, as they may be sharded (when `jit` is `True`), or not.
416
+ guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0])
417
+ if len(prompt_ids.shape) > 2:
418
+ # Assume sharded
419
+ guidance_scale = guidance_scale[:, None]
420
+
421
+ if isinstance(controlnet_conditioning_scale, float):
422
+ # Convert to a tensor so each device gets a copy. Follow the prompt_ids for
423
+ # shape information, as they may be sharded (when `jit` is `True`), or not.
424
+ controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0])
425
+ if len(prompt_ids.shape) > 2:
426
+ # Assume sharded
427
+ controlnet_conditioning_scale = controlnet_conditioning_scale[:, None]
428
+
429
+ if jit:
430
+ images = _p_generate(
431
+ self,
432
+ prompt_ids,
433
+ image,
434
+ params,
435
+ prng_seed,
436
+ num_inference_steps,
437
+ guidance_scale,
438
+ latents,
439
+ neg_prompt_ids,
440
+ controlnet_conditioning_scale,
441
+ )
442
+ else:
443
+ images = self._generate(
444
+ prompt_ids,
445
+ image,
446
+ params,
447
+ prng_seed,
448
+ num_inference_steps,
449
+ guidance_scale,
450
+ latents,
451
+ neg_prompt_ids,
452
+ controlnet_conditioning_scale,
453
+ )
454
+
455
+ if self.safety_checker is not None:
456
+ safety_params = params["safety_checker"]
457
+ images_uint8_casted = (images * 255).round().astype("uint8")
458
+ num_devices, batch_size = images.shape[:2]
459
+
460
+ images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
461
+ images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
462
+ images = np.array(images)
463
+
464
+ # block images
465
+ if any(has_nsfw_concept):
466
+ for i, is_nsfw in enumerate(has_nsfw_concept):
467
+ if is_nsfw:
468
+ images[i] = np.asarray(images_uint8_casted[i])
469
+
470
+ images = images.reshape(num_devices, batch_size, height, width, 3)
471
+ else:
472
+ images = np.asarray(images)
473
+ has_nsfw_concept = False
474
+
475
+ if not return_dict:
476
+ return (images, has_nsfw_concept)
477
+
478
+ return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
479
+
480
+
481
+ # Static argnums are pipe, num_inference_steps. A change would trigger recompilation.
482
+ # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`).
483
+ @partial(
484
+ jax.pmap,
485
+ in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0),
486
+ static_broadcasted_argnums=(0, 5),
487
+ )
488
+ def _p_generate(
489
+ pipe,
490
+ prompt_ids,
491
+ image,
492
+ params,
493
+ prng_seed,
494
+ num_inference_steps,
495
+ guidance_scale,
496
+ latents,
497
+ neg_prompt_ids,
498
+ controlnet_conditioning_scale,
499
+ ):
500
+ return pipe._generate(
501
+ prompt_ids,
502
+ image,
503
+ params,
504
+ prng_seed,
505
+ num_inference_steps,
506
+ guidance_scale,
507
+ latents,
508
+ neg_prompt_ids,
509
+ controlnet_conditioning_scale,
510
+ )
511
+
512
+
513
+ @partial(jax.pmap, static_broadcasted_argnums=(0,))
514
+ def _p_get_has_nsfw_concepts(pipe, features, params):
515
+ return pipe._get_has_nsfw_concepts(features, params)
516
+
517
+
518
+ def unshard(x: jnp.ndarray):
519
+ # einops.rearrange(x, 'd b ... -> (d b) ...')
520
+ num_devices, batch_size = x.shape[:2]
521
+ rest = x.shape[2:]
522
+ return x.reshape(num_devices * batch_size, *rest)
523
+
524
+
525
+ def preprocess(image, dtype):
526
+ image = image.convert("RGB")
527
+ w, h = image.size
528
+ w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64
529
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
530
+ image = jnp.array(image).astype(dtype) / 255.0
531
+ image = image[None].transpose(0, 3, 1, 2)
532
+ return image
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/pipeline_if_inpainting_superresolution.cpython-310.pyc ADDED
Binary file (31.7 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/deepfloyd_if/__pycache__/safety_checker.cpython-310.pyc ADDED
Binary file (1.97 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/dit/__pycache__/pipeline_dit.cpython-310.pyc ADDED
Binary file (7.38 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/dit/pipeline_dit.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)
2
+ # William Peebles and Saining Xie
3
+ #
4
+ # Copyright (c) 2021 OpenAI
5
+ # MIT License
6
+ #
7
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ from typing import Dict, List, Optional, Tuple, Union
22
+
23
+ import torch
24
+
25
+ from ...models import AutoencoderKL, Transformer2DModel
26
+ from ...schedulers import KarrasDiffusionSchedulers
27
+ from ...utils.torch_utils import randn_tensor
28
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
29
+
30
+
31
+ class DiTPipeline(DiffusionPipeline):
32
+ r"""
33
+ Pipeline for image generation based on a Transformer backbone instead of a UNet.
34
+
35
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
36
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
37
+
38
+ Parameters:
39
+ transformer ([`Transformer2DModel`]):
40
+ A class conditioned `Transformer2DModel` to denoise the encoded image latents.
41
+ vae ([`AutoencoderKL`]):
42
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
43
+ scheduler ([`DDIMScheduler`]):
44
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
45
+ """
46
+
47
+ model_cpu_offload_seq = "transformer->vae"
48
+
49
+ def __init__(
50
+ self,
51
+ transformer: Transformer2DModel,
52
+ vae: AutoencoderKL,
53
+ scheduler: KarrasDiffusionSchedulers,
54
+ id2label: Optional[Dict[int, str]] = None,
55
+ ):
56
+ super().__init__()
57
+ self.register_modules(transformer=transformer, vae=vae, scheduler=scheduler)
58
+
59
+ # create a imagenet -> id dictionary for easier use
60
+ self.labels = {}
61
+ if id2label is not None:
62
+ for key, value in id2label.items():
63
+ for label in value.split(","):
64
+ self.labels[label.lstrip().rstrip()] = int(key)
65
+ self.labels = dict(sorted(self.labels.items()))
66
+
67
+ def get_label_ids(self, label: Union[str, List[str]]) -> List[int]:
68
+ r"""
69
+
70
+ Map label strings from ImageNet to corresponding class ids.
71
+
72
+ Parameters:
73
+ label (`str` or `dict` of `str`):
74
+ Label strings to be mapped to class ids.
75
+
76
+ Returns:
77
+ `list` of `int`:
78
+ Class ids to be processed by pipeline.
79
+ """
80
+
81
+ if not isinstance(label, list):
82
+ label = list(label)
83
+
84
+ for l in label:
85
+ if l not in self.labels:
86
+ raise ValueError(
87
+ f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}."
88
+ )
89
+
90
+ return [self.labels[l] for l in label]
91
+
92
+ @torch.no_grad()
93
+ def __call__(
94
+ self,
95
+ class_labels: List[int],
96
+ guidance_scale: float = 4.0,
97
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
98
+ num_inference_steps: int = 50,
99
+ output_type: Optional[str] = "pil",
100
+ return_dict: bool = True,
101
+ ) -> Union[ImagePipelineOutput, Tuple]:
102
+ r"""
103
+ The call function to the pipeline for generation.
104
+
105
+ Args:
106
+ class_labels (List[int]):
107
+ List of ImageNet class labels for the images to be generated.
108
+ guidance_scale (`float`, *optional*, defaults to 4.0):
109
+ A higher guidance scale value encourages the model to generate images closely linked to the text
110
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
111
+ generator (`torch.Generator`, *optional*):
112
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
113
+ generation deterministic.
114
+ num_inference_steps (`int`, *optional*, defaults to 250):
115
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
116
+ expense of slower inference.
117
+ output_type (`str`, *optional*, defaults to `"pil"`):
118
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
119
+ return_dict (`bool`, *optional*, defaults to `True`):
120
+ Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
121
+
122
+ Examples:
123
+
124
+ ```py
125
+ >>> from diffusers import DiTPipeline, DPMSolverMultistepScheduler
126
+ >>> import torch
127
+
128
+ >>> pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256", torch_dtype=torch.float16)
129
+ >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
130
+ >>> pipe = pipe.to("cuda")
131
+
132
+ >>> # pick words from Imagenet class labels
133
+ >>> pipe.labels # to print all available words
134
+
135
+ >>> # pick words that exist in ImageNet
136
+ >>> words = ["white shark", "umbrella"]
137
+
138
+ >>> class_ids = pipe.get_label_ids(words)
139
+
140
+ >>> generator = torch.manual_seed(33)
141
+ >>> output = pipe(class_labels=class_ids, num_inference_steps=25, generator=generator)
142
+
143
+ >>> image = output.images[0] # label 'white shark'
144
+ ```
145
+
146
+ Returns:
147
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
148
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
149
+ returned where the first element is a list with the generated images
150
+ """
151
+
152
+ batch_size = len(class_labels)
153
+ latent_size = self.transformer.config.sample_size
154
+ latent_channels = self.transformer.config.in_channels
155
+
156
+ latents = randn_tensor(
157
+ shape=(batch_size, latent_channels, latent_size, latent_size),
158
+ generator=generator,
159
+ device=self._execution_device,
160
+ dtype=self.transformer.dtype,
161
+ )
162
+ latent_model_input = torch.cat([latents] * 2) if guidance_scale > 1 else latents
163
+
164
+ class_labels = torch.tensor(class_labels, device=self._execution_device).reshape(-1)
165
+ class_null = torch.tensor([1000] * batch_size, device=self._execution_device)
166
+ class_labels_input = torch.cat([class_labels, class_null], 0) if guidance_scale > 1 else class_labels
167
+
168
+ # set step values
169
+ self.scheduler.set_timesteps(num_inference_steps)
170
+ for t in self.progress_bar(self.scheduler.timesteps):
171
+ if guidance_scale > 1:
172
+ half = latent_model_input[: len(latent_model_input) // 2]
173
+ latent_model_input = torch.cat([half, half], dim=0)
174
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
175
+
176
+ timesteps = t
177
+ if not torch.is_tensor(timesteps):
178
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
179
+ # This would be a good case for the `match` statement (Python 3.10+)
180
+ is_mps = latent_model_input.device.type == "mps"
181
+ if isinstance(timesteps, float):
182
+ dtype = torch.float32 if is_mps else torch.float64
183
+ else:
184
+ dtype = torch.int32 if is_mps else torch.int64
185
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=latent_model_input.device)
186
+ elif len(timesteps.shape) == 0:
187
+ timesteps = timesteps[None].to(latent_model_input.device)
188
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
189
+ timesteps = timesteps.expand(latent_model_input.shape[0])
190
+ # predict noise model_output
191
+ noise_pred = self.transformer(
192
+ latent_model_input, timestep=timesteps, class_labels=class_labels_input
193
+ ).sample
194
+
195
+ # perform guidance
196
+ if guidance_scale > 1:
197
+ eps, rest = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
198
+ cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)
199
+
200
+ half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
201
+ eps = torch.cat([half_eps, half_eps], dim=0)
202
+
203
+ noise_pred = torch.cat([eps, rest], dim=1)
204
+
205
+ # learned sigma
206
+ if self.transformer.config.out_channels // 2 == latent_channels:
207
+ model_output, _ = torch.split(noise_pred, latent_channels, dim=1)
208
+ else:
209
+ model_output = noise_pred
210
+
211
+ # compute previous image: x_t -> x_t-1
212
+ latent_model_input = self.scheduler.step(model_output, t, latent_model_input).prev_sample
213
+
214
+ if guidance_scale > 1:
215
+ latents, _ = latent_model_input.chunk(2, dim=0)
216
+ else:
217
+ latents = latent_model_input
218
+
219
+ latents = 1 / self.vae.config.scaling_factor * latents
220
+ samples = self.vae.decode(latents).sample
221
+
222
+ samples = (samples / 2 + 0.5).clamp(0, 1)
223
+
224
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
225
+ samples = samples.cpu().permute(0, 2, 3, 1).float().numpy()
226
+
227
+ if output_type == "pil":
228
+ samples = self.numpy_to_pil(samples)
229
+
230
+ if not return_dict:
231
+ return (samples,)
232
+
233
+ return ImagePipelineOutput(images=samples)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/kandinsky/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/kandinsky/__pycache__/pipeline_kandinsky_combined.cpython-310.pyc ADDED
Binary file (31.8 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/kandinsky/__pycache__/text_encoder.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/kandinsky3/convert_kandinsky3_unet.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import fnmatch
4
+
5
+ from safetensors.torch import load_file
6
+
7
+ from diffusers import Kandinsky3UNet
8
+
9
+
10
+ MAPPING = {
11
+ "to_time_embed.1": "time_embedding.linear_1",
12
+ "to_time_embed.3": "time_embedding.linear_2",
13
+ "in_layer": "conv_in",
14
+ "out_layer.0": "conv_norm_out",
15
+ "out_layer.2": "conv_out",
16
+ "down_samples": "down_blocks",
17
+ "up_samples": "up_blocks",
18
+ "projection_lin": "encoder_hid_proj.projection_linear",
19
+ "projection_ln": "encoder_hid_proj.projection_norm",
20
+ "feature_pooling": "add_time_condition",
21
+ "to_query": "to_q",
22
+ "to_key": "to_k",
23
+ "to_value": "to_v",
24
+ "output_layer": "to_out.0",
25
+ "self_attention_block": "attentions.0",
26
+ }
27
+
28
+ DYNAMIC_MAP = {
29
+ "resnet_attn_blocks.*.0": "resnets_in.*",
30
+ "resnet_attn_blocks.*.1": ("attentions.*", 1),
31
+ "resnet_attn_blocks.*.2": "resnets_out.*",
32
+ }
33
+ # MAPPING = {}
34
+
35
+
36
+ def convert_state_dict(unet_state_dict):
37
+ """
38
+ Convert the state dict of a U-Net model to match the key format expected by Kandinsky3UNet model.
39
+ Args:
40
+ unet_model (torch.nn.Module): The original U-Net model.
41
+ unet_kandi3_model (torch.nn.Module): The Kandinsky3UNet model to match keys with.
42
+
43
+ Returns:
44
+ OrderedDict: The converted state dictionary.
45
+ """
46
+ # Example of renaming logic (this will vary based on your model's architecture)
47
+ converted_state_dict = {}
48
+ for key in unet_state_dict:
49
+ new_key = key
50
+ for pattern, new_pattern in MAPPING.items():
51
+ new_key = new_key.replace(pattern, new_pattern)
52
+
53
+ for dyn_pattern, dyn_new_pattern in DYNAMIC_MAP.items():
54
+ has_matched = False
55
+ if fnmatch.fnmatch(new_key, f"*.{dyn_pattern}.*") and not has_matched:
56
+ star = int(new_key.split(dyn_pattern.split(".")[0])[-1].split(".")[1])
57
+
58
+ if isinstance(dyn_new_pattern, tuple):
59
+ new_star = star + dyn_new_pattern[-1]
60
+ dyn_new_pattern = dyn_new_pattern[0]
61
+ else:
62
+ new_star = star
63
+
64
+ pattern = dyn_pattern.replace("*", str(star))
65
+ new_pattern = dyn_new_pattern.replace("*", str(new_star))
66
+
67
+ new_key = new_key.replace(pattern, new_pattern)
68
+ has_matched = True
69
+
70
+ converted_state_dict[new_key] = unet_state_dict[key]
71
+
72
+ return converted_state_dict
73
+
74
+
75
+ def main(model_path, output_path):
76
+ # Load your original U-Net model
77
+ unet_state_dict = load_file(model_path)
78
+
79
+ # Initialize your Kandinsky3UNet model
80
+ config = {}
81
+
82
+ # Convert the state dict
83
+ converted_state_dict = convert_state_dict(unet_state_dict)
84
+
85
+ unet = Kandinsky3UNet(config)
86
+ unet.load_state_dict(converted_state_dict)
87
+
88
+ unet.save_pretrained(output_path)
89
+ print(f"Converted model saved to {output_path}")
90
+
91
+
92
+ if __name__ == "__main__":
93
+ parser = argparse.ArgumentParser(description="Convert U-Net PyTorch model to Kandinsky3UNet format")
94
+ parser.add_argument("--model_path", type=str, required=True, help="Path to the original U-Net PyTorch model")
95
+ parser.add_argument("--output_path", type=str, required=True, help="Path to save the converted model")
96
+
97
+ args = parser.parse_args()
98
+ main(args.model_path, args.output_path)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/__init__.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+ try:
17
+ if not (is_transformers_available() and is_torch_available()):
18
+ raise OptionalDependencyNotAvailable()
19
+ except OptionalDependencyNotAvailable:
20
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
21
+
22
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
23
+ else:
24
+ _import_structure["pipeline_latent_diffusion"] = ["LDMBertModel", "LDMTextToImagePipeline"]
25
+ _import_structure["pipeline_latent_diffusion_superresolution"] = ["LDMSuperResolutionPipeline"]
26
+
27
+
28
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
29
+ try:
30
+ if not (is_transformers_available() and is_torch_available()):
31
+ raise OptionalDependencyNotAvailable()
32
+
33
+ except OptionalDependencyNotAvailable:
34
+ from ...utils.dummy_torch_and_transformers_objects import *
35
+ else:
36
+ from .pipeline_latent_diffusion import LDMBertModel, LDMTextToImagePipeline
37
+ from .pipeline_latent_diffusion_superresolution import LDMSuperResolutionPipeline
38
+
39
+ else:
40
+ import sys
41
+
42
+ sys.modules[__name__] = _LazyModule(
43
+ __name__,
44
+ globals()["__file__"],
45
+ _import_structure,
46
+ module_spec=__spec__,
47
+ )
48
+
49
+ for name, value in _dummy_objects.items():
50
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/__pycache__/pipeline_latent_diffusion_superresolution.cpython-310.pyc ADDED
Binary file (7.32 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.utils.checkpoint
21
+ from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer
22
+ from transformers.activations import ACT2FN
23
+ from transformers.modeling_outputs import BaseModelOutput
24
+ from transformers.utils import logging
25
+
26
+ from ...models import AutoencoderKL, UNet2DConditionModel, UNet2DModel, VQModel
27
+ from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
28
+ from ...utils.torch_utils import randn_tensor
29
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
30
+
31
+
32
+ class LDMTextToImagePipeline(DiffusionPipeline):
33
+ r"""
34
+ Pipeline for text-to-image generation using latent diffusion.
35
+
36
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
37
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
38
+
39
+ Parameters:
40
+ vqvae ([`VQModel`]):
41
+ Vector-quantized (VQ) model to encode and decode images to and from latent representations.
42
+ bert ([`LDMBertModel`]):
43
+ Text-encoder model based on [`~transformers.BERT`].
44
+ tokenizer ([`~transformers.BertTokenizer`]):
45
+ A `BertTokenizer` to tokenize text.
46
+ unet ([`UNet2DConditionModel`]):
47
+ A `UNet2DConditionModel` to denoise the encoded image latents.
48
+ scheduler ([`SchedulerMixin`]):
49
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
50
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
51
+ """
52
+
53
+ model_cpu_offload_seq = "bert->unet->vqvae"
54
+
55
+ def __init__(
56
+ self,
57
+ vqvae: Union[VQModel, AutoencoderKL],
58
+ bert: PreTrainedModel,
59
+ tokenizer: PreTrainedTokenizer,
60
+ unet: Union[UNet2DModel, UNet2DConditionModel],
61
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
62
+ ):
63
+ super().__init__()
64
+ self.register_modules(vqvae=vqvae, bert=bert, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
65
+ self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1)
66
+
67
+ @torch.no_grad()
68
+ def __call__(
69
+ self,
70
+ prompt: Union[str, List[str]],
71
+ height: Optional[int] = None,
72
+ width: Optional[int] = None,
73
+ num_inference_steps: Optional[int] = 50,
74
+ guidance_scale: Optional[float] = 1.0,
75
+ eta: Optional[float] = 0.0,
76
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
77
+ latents: Optional[torch.FloatTensor] = None,
78
+ output_type: Optional[str] = "pil",
79
+ return_dict: bool = True,
80
+ **kwargs,
81
+ ) -> Union[Tuple, ImagePipelineOutput]:
82
+ r"""
83
+ The call function to the pipeline for generation.
84
+
85
+ Args:
86
+ prompt (`str` or `List[str]`):
87
+ The prompt or prompts to guide the image generation.
88
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
89
+ The height in pixels of the generated image.
90
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
91
+ The width in pixels of the generated image.
92
+ num_inference_steps (`int`, *optional*, defaults to 50):
93
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
94
+ expense of slower inference.
95
+ guidance_scale (`float`, *optional*, defaults to 1.0):
96
+ A higher guidance scale value encourages the model to generate images closely linked to the text
97
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
98
+ generator (`torch.Generator`, *optional*):
99
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
100
+ generation deterministic.
101
+ latents (`torch.FloatTensor`, *optional*):
102
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
103
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
104
+ tensor is generated by sampling using the supplied random `generator`.
105
+ output_type (`str`, *optional*, defaults to `"pil"`):
106
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
107
+ return_dict (`bool`, *optional*, defaults to `True`):
108
+ Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
109
+
110
+ Example:
111
+
112
+ ```py
113
+ >>> from diffusers import DiffusionPipeline
114
+
115
+ >>> # load model and scheduler
116
+ >>> ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
117
+
118
+ >>> # run pipeline in inference (sample random noise and denoise)
119
+ >>> prompt = "A painting of a squirrel eating a burger"
120
+ >>> images = ldm([prompt], num_inference_steps=50, eta=0.3, guidance_scale=6).images
121
+
122
+ >>> # save images
123
+ >>> for idx, image in enumerate(images):
124
+ ... image.save(f"squirrel-{idx}.png")
125
+ ```
126
+
127
+ Returns:
128
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
129
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
130
+ returned where the first element is a list with the generated images.
131
+ """
132
+ # 0. Default height and width to unet
133
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
134
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
135
+
136
+ if isinstance(prompt, str):
137
+ batch_size = 1
138
+ elif isinstance(prompt, list):
139
+ batch_size = len(prompt)
140
+ else:
141
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
142
+
143
+ if height % 8 != 0 or width % 8 != 0:
144
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
145
+
146
+ # get unconditional embeddings for classifier free guidance
147
+ if guidance_scale != 1.0:
148
+ uncond_input = self.tokenizer(
149
+ [""] * batch_size, padding="max_length", max_length=77, truncation=True, return_tensors="pt"
150
+ )
151
+ negative_prompt_embeds = self.bert(uncond_input.input_ids.to(self._execution_device))[0]
152
+
153
+ # get prompt text embeddings
154
+ text_input = self.tokenizer(prompt, padding="max_length", max_length=77, truncation=True, return_tensors="pt")
155
+ prompt_embeds = self.bert(text_input.input_ids.to(self._execution_device))[0]
156
+
157
+ # get the initial random noise unless the user supplied it
158
+ latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
159
+ if isinstance(generator, list) and len(generator) != batch_size:
160
+ raise ValueError(
161
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
162
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
163
+ )
164
+
165
+ if latents is None:
166
+ latents = randn_tensor(
167
+ latents_shape, generator=generator, device=self._execution_device, dtype=prompt_embeds.dtype
168
+ )
169
+ else:
170
+ if latents.shape != latents_shape:
171
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
172
+ latents = latents.to(self._execution_device)
173
+
174
+ self.scheduler.set_timesteps(num_inference_steps)
175
+
176
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
177
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
178
+
179
+ extra_kwargs = {}
180
+ if accepts_eta:
181
+ extra_kwargs["eta"] = eta
182
+
183
+ for t in self.progress_bar(self.scheduler.timesteps):
184
+ if guidance_scale == 1.0:
185
+ # guidance_scale of 1 means no guidance
186
+ latents_input = latents
187
+ context = prompt_embeds
188
+ else:
189
+ # For classifier free guidance, we need to do two forward passes.
190
+ # Here we concatenate the unconditional and text embeddings into a single batch
191
+ # to avoid doing two forward passes
192
+ latents_input = torch.cat([latents] * 2)
193
+ context = torch.cat([negative_prompt_embeds, prompt_embeds])
194
+
195
+ # predict the noise residual
196
+ noise_pred = self.unet(latents_input, t, encoder_hidden_states=context).sample
197
+ # perform guidance
198
+ if guidance_scale != 1.0:
199
+ noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)
200
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
201
+
202
+ # compute the previous noisy sample x_t -> x_t-1
203
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample
204
+
205
+ # scale and decode the image latents with vae
206
+ latents = 1 / self.vqvae.config.scaling_factor * latents
207
+ image = self.vqvae.decode(latents).sample
208
+
209
+ image = (image / 2 + 0.5).clamp(0, 1)
210
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
211
+ if output_type == "pil":
212
+ image = self.numpy_to_pil(image)
213
+
214
+ if not return_dict:
215
+ return (image,)
216
+
217
+ return ImagePipelineOutput(images=image)
218
+
219
+
220
+ ################################################################################
221
+ # Code for the text transformer model
222
+ ################################################################################
223
+ """ PyTorch LDMBERT model."""
224
+
225
+
226
+ logger = logging.get_logger(__name__)
227
+
228
+ LDMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
229
+ "ldm-bert",
230
+ # See all LDMBert models at https://huggingface.co/models?filter=ldmbert
231
+ ]
232
+
233
+
234
+ LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
235
+ "ldm-bert": "https://huggingface.co/valhalla/ldm-bert/blob/main/config.json",
236
+ }
237
+
238
+
239
+ """ LDMBERT model configuration"""
240
+
241
+
242
+ class LDMBertConfig(PretrainedConfig):
243
+ model_type = "ldmbert"
244
+ keys_to_ignore_at_inference = ["past_key_values"]
245
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
246
+
247
+ def __init__(
248
+ self,
249
+ vocab_size=30522,
250
+ max_position_embeddings=77,
251
+ encoder_layers=32,
252
+ encoder_ffn_dim=5120,
253
+ encoder_attention_heads=8,
254
+ head_dim=64,
255
+ encoder_layerdrop=0.0,
256
+ activation_function="gelu",
257
+ d_model=1280,
258
+ dropout=0.1,
259
+ attention_dropout=0.0,
260
+ activation_dropout=0.0,
261
+ init_std=0.02,
262
+ classifier_dropout=0.0,
263
+ scale_embedding=False,
264
+ use_cache=True,
265
+ pad_token_id=0,
266
+ **kwargs,
267
+ ):
268
+ self.vocab_size = vocab_size
269
+ self.max_position_embeddings = max_position_embeddings
270
+ self.d_model = d_model
271
+ self.encoder_ffn_dim = encoder_ffn_dim
272
+ self.encoder_layers = encoder_layers
273
+ self.encoder_attention_heads = encoder_attention_heads
274
+ self.head_dim = head_dim
275
+ self.dropout = dropout
276
+ self.attention_dropout = attention_dropout
277
+ self.activation_dropout = activation_dropout
278
+ self.activation_function = activation_function
279
+ self.init_std = init_std
280
+ self.encoder_layerdrop = encoder_layerdrop
281
+ self.classifier_dropout = classifier_dropout
282
+ self.use_cache = use_cache
283
+ self.num_hidden_layers = encoder_layers
284
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
285
+
286
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
287
+
288
+
289
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
290
+ """
291
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
292
+ """
293
+ bsz, src_len = mask.size()
294
+ tgt_len = tgt_len if tgt_len is not None else src_len
295
+
296
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
297
+
298
+ inverted_mask = 1.0 - expanded_mask
299
+
300
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
301
+
302
+
303
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->LDMBert
304
+ class LDMBertAttention(nn.Module):
305
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
306
+
307
+ def __init__(
308
+ self,
309
+ embed_dim: int,
310
+ num_heads: int,
311
+ head_dim: int,
312
+ dropout: float = 0.0,
313
+ is_decoder: bool = False,
314
+ bias: bool = False,
315
+ ):
316
+ super().__init__()
317
+ self.embed_dim = embed_dim
318
+ self.num_heads = num_heads
319
+ self.dropout = dropout
320
+ self.head_dim = head_dim
321
+ self.inner_dim = head_dim * num_heads
322
+
323
+ self.scaling = self.head_dim**-0.5
324
+ self.is_decoder = is_decoder
325
+
326
+ self.k_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias)
327
+ self.v_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias)
328
+ self.q_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias)
329
+ self.out_proj = nn.Linear(self.inner_dim, embed_dim)
330
+
331
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
332
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
333
+
334
+ def forward(
335
+ self,
336
+ hidden_states: torch.Tensor,
337
+ key_value_states: Optional[torch.Tensor] = None,
338
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
339
+ attention_mask: Optional[torch.Tensor] = None,
340
+ layer_head_mask: Optional[torch.Tensor] = None,
341
+ output_attentions: bool = False,
342
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
343
+ """Input shape: Batch x Time x Channel"""
344
+
345
+ # if key_value_states are provided this layer is used as a cross-attention layer
346
+ # for the decoder
347
+ is_cross_attention = key_value_states is not None
348
+
349
+ bsz, tgt_len, _ = hidden_states.size()
350
+
351
+ # get query proj
352
+ query_states = self.q_proj(hidden_states) * self.scaling
353
+ # get key, value proj
354
+ if is_cross_attention and past_key_value is not None:
355
+ # reuse k,v, cross_attentions
356
+ key_states = past_key_value[0]
357
+ value_states = past_key_value[1]
358
+ elif is_cross_attention:
359
+ # cross_attentions
360
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
361
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
362
+ elif past_key_value is not None:
363
+ # reuse k, v, self_attention
364
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
365
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
366
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
367
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
368
+ else:
369
+ # self_attention
370
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
371
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
372
+
373
+ if self.is_decoder:
374
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
375
+ # Further calls to cross_attention layer can then reuse all cross-attention
376
+ # key/value_states (first "if" case)
377
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
378
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
379
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
380
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
381
+ past_key_value = (key_states, value_states)
382
+
383
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
384
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
385
+ key_states = key_states.view(*proj_shape)
386
+ value_states = value_states.view(*proj_shape)
387
+
388
+ src_len = key_states.size(1)
389
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
390
+
391
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
392
+ raise ValueError(
393
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
394
+ f" {attn_weights.size()}"
395
+ )
396
+
397
+ if attention_mask is not None:
398
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
399
+ raise ValueError(
400
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
401
+ )
402
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
403
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
404
+
405
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
406
+
407
+ if layer_head_mask is not None:
408
+ if layer_head_mask.size() != (self.num_heads,):
409
+ raise ValueError(
410
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
411
+ f" {layer_head_mask.size()}"
412
+ )
413
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
414
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
415
+
416
+ if output_attentions:
417
+ # this operation is a bit awkward, but it's required to
418
+ # make sure that attn_weights keeps its gradient.
419
+ # In order to do so, attn_weights have to be reshaped
420
+ # twice and have to be reused in the following
421
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
422
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
423
+ else:
424
+ attn_weights_reshaped = None
425
+
426
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
427
+
428
+ attn_output = torch.bmm(attn_probs, value_states)
429
+
430
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
431
+ raise ValueError(
432
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
433
+ f" {attn_output.size()}"
434
+ )
435
+
436
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
437
+ attn_output = attn_output.transpose(1, 2)
438
+
439
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
440
+ # partitioned across GPUs when using tensor-parallelism.
441
+ attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim)
442
+
443
+ attn_output = self.out_proj(attn_output)
444
+
445
+ return attn_output, attn_weights_reshaped, past_key_value
446
+
447
+
448
+ class LDMBertEncoderLayer(nn.Module):
449
+ def __init__(self, config: LDMBertConfig):
450
+ super().__init__()
451
+ self.embed_dim = config.d_model
452
+ self.self_attn = LDMBertAttention(
453
+ embed_dim=self.embed_dim,
454
+ num_heads=config.encoder_attention_heads,
455
+ head_dim=config.head_dim,
456
+ dropout=config.attention_dropout,
457
+ )
458
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
459
+ self.dropout = config.dropout
460
+ self.activation_fn = ACT2FN[config.activation_function]
461
+ self.activation_dropout = config.activation_dropout
462
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
463
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
464
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
465
+
466
+ def forward(
467
+ self,
468
+ hidden_states: torch.FloatTensor,
469
+ attention_mask: torch.FloatTensor,
470
+ layer_head_mask: torch.FloatTensor,
471
+ output_attentions: Optional[bool] = False,
472
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
473
+ """
474
+ Args:
475
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
476
+ attention_mask (`torch.FloatTensor`): attention mask of size
477
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
478
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
479
+ `(encoder_attention_heads,)`.
480
+ output_attentions (`bool`, *optional*):
481
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
482
+ returned tensors for more detail.
483
+ """
484
+ residual = hidden_states
485
+ hidden_states = self.self_attn_layer_norm(hidden_states)
486
+ hidden_states, attn_weights, _ = self.self_attn(
487
+ hidden_states=hidden_states,
488
+ attention_mask=attention_mask,
489
+ layer_head_mask=layer_head_mask,
490
+ output_attentions=output_attentions,
491
+ )
492
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
493
+ hidden_states = residual + hidden_states
494
+
495
+ residual = hidden_states
496
+ hidden_states = self.final_layer_norm(hidden_states)
497
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
498
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
499
+ hidden_states = self.fc2(hidden_states)
500
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
501
+ hidden_states = residual + hidden_states
502
+
503
+ if hidden_states.dtype == torch.float16 and (
504
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
505
+ ):
506
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
507
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
508
+
509
+ outputs = (hidden_states,)
510
+
511
+ if output_attentions:
512
+ outputs += (attn_weights,)
513
+
514
+ return outputs
515
+
516
+
517
+ # Copied from transformers.models.bart.modeling_bart.BartPretrainedModel with Bart->LDMBert
518
+ class LDMBertPreTrainedModel(PreTrainedModel):
519
+ config_class = LDMBertConfig
520
+ base_model_prefix = "model"
521
+ _supports_gradient_checkpointing = True
522
+ _keys_to_ignore_on_load_unexpected = [r"encoder\.version", r"decoder\.version"]
523
+
524
+ def _init_weights(self, module):
525
+ std = self.config.init_std
526
+ if isinstance(module, nn.Linear):
527
+ module.weight.data.normal_(mean=0.0, std=std)
528
+ if module.bias is not None:
529
+ module.bias.data.zero_()
530
+ elif isinstance(module, nn.Embedding):
531
+ module.weight.data.normal_(mean=0.0, std=std)
532
+ if module.padding_idx is not None:
533
+ module.weight.data[module.padding_idx].zero_()
534
+
535
+ def _set_gradient_checkpointing(self, module, value=False):
536
+ if isinstance(module, (LDMBertEncoder,)):
537
+ module.gradient_checkpointing = value
538
+
539
+ @property
540
+ def dummy_inputs(self):
541
+ pad_token = self.config.pad_token_id
542
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
543
+ dummy_inputs = {
544
+ "attention_mask": input_ids.ne(pad_token),
545
+ "input_ids": input_ids,
546
+ }
547
+ return dummy_inputs
548
+
549
+
550
+ class LDMBertEncoder(LDMBertPreTrainedModel):
551
+ """
552
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
553
+ [`LDMBertEncoderLayer`].
554
+
555
+ Args:
556
+ config: LDMBertConfig
557
+ embed_tokens (nn.Embedding): output embedding
558
+ """
559
+
560
+ def __init__(self, config: LDMBertConfig):
561
+ super().__init__(config)
562
+
563
+ self.dropout = config.dropout
564
+
565
+ embed_dim = config.d_model
566
+ self.padding_idx = config.pad_token_id
567
+ self.max_source_positions = config.max_position_embeddings
568
+
569
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim)
570
+ self.embed_positions = nn.Embedding(config.max_position_embeddings, embed_dim)
571
+ self.layers = nn.ModuleList([LDMBertEncoderLayer(config) for _ in range(config.encoder_layers)])
572
+ self.layer_norm = nn.LayerNorm(embed_dim)
573
+
574
+ self.gradient_checkpointing = False
575
+ # Initialize weights and apply final processing
576
+ self.post_init()
577
+
578
+ def get_input_embeddings(self):
579
+ return self.embed_tokens
580
+
581
+ def set_input_embeddings(self, value):
582
+ self.embed_tokens = value
583
+
584
+ def forward(
585
+ self,
586
+ input_ids: torch.LongTensor = None,
587
+ attention_mask: Optional[torch.Tensor] = None,
588
+ position_ids: Optional[torch.LongTensor] = None,
589
+ head_mask: Optional[torch.Tensor] = None,
590
+ inputs_embeds: Optional[torch.FloatTensor] = None,
591
+ output_attentions: Optional[bool] = None,
592
+ output_hidden_states: Optional[bool] = None,
593
+ return_dict: Optional[bool] = None,
594
+ ) -> Union[Tuple, BaseModelOutput]:
595
+ r"""
596
+ Args:
597
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
598
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
599
+ provide it.
600
+
601
+ Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
602
+ [`PreTrainedTokenizer.__call__`] for details.
603
+
604
+ [What are input IDs?](../glossary#input-ids)
605
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
606
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
607
+
608
+ - 1 for tokens that are **not masked**,
609
+ - 0 for tokens that are **masked**.
610
+
611
+ [What are attention masks?](../glossary#attention-mask)
612
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
613
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
614
+
615
+ - 1 indicates the head is **not masked**,
616
+ - 0 indicates the head is **masked**.
617
+
618
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
619
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
620
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
621
+ than the model's internal embedding lookup matrix.
622
+ output_attentions (`bool`, *optional*):
623
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
624
+ returned tensors for more detail.
625
+ output_hidden_states (`bool`, *optional*):
626
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
627
+ for more detail.
628
+ return_dict (`bool`, *optional*):
629
+ Whether or not to return a [`~utils.BaseModelOutput`] instead of a plain tuple.
630
+ """
631
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
632
+ output_hidden_states = (
633
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
634
+ )
635
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
636
+
637
+ # retrieve input_ids and inputs_embeds
638
+ if input_ids is not None and inputs_embeds is not None:
639
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
640
+ elif input_ids is not None:
641
+ input_shape = input_ids.size()
642
+ input_ids = input_ids.view(-1, input_shape[-1])
643
+ elif inputs_embeds is not None:
644
+ input_shape = inputs_embeds.size()[:-1]
645
+ else:
646
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
647
+
648
+ if inputs_embeds is None:
649
+ inputs_embeds = self.embed_tokens(input_ids)
650
+
651
+ seq_len = input_shape[1]
652
+ if position_ids is None:
653
+ position_ids = torch.arange(seq_len, dtype=torch.long, device=inputs_embeds.device).expand((1, -1))
654
+ embed_pos = self.embed_positions(position_ids)
655
+
656
+ hidden_states = inputs_embeds + embed_pos
657
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
658
+
659
+ # expand attention_mask
660
+ if attention_mask is not None:
661
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
662
+ attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
663
+
664
+ encoder_states = () if output_hidden_states else None
665
+ all_attentions = () if output_attentions else None
666
+
667
+ # check if head_mask has a correct number of layers specified if desired
668
+ if head_mask is not None:
669
+ if head_mask.size()[0] != (len(self.layers)):
670
+ raise ValueError(
671
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
672
+ f" {head_mask.size()[0]}."
673
+ )
674
+
675
+ for idx, encoder_layer in enumerate(self.layers):
676
+ if output_hidden_states:
677
+ encoder_states = encoder_states + (hidden_states,)
678
+ if self.gradient_checkpointing and self.training:
679
+
680
+ def create_custom_forward(module):
681
+ def custom_forward(*inputs):
682
+ return module(*inputs, output_attentions)
683
+
684
+ return custom_forward
685
+
686
+ layer_outputs = torch.utils.checkpoint.checkpoint(
687
+ create_custom_forward(encoder_layer),
688
+ hidden_states,
689
+ attention_mask,
690
+ (head_mask[idx] if head_mask is not None else None),
691
+ )
692
+ else:
693
+ layer_outputs = encoder_layer(
694
+ hidden_states,
695
+ attention_mask,
696
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
697
+ output_attentions=output_attentions,
698
+ )
699
+
700
+ hidden_states = layer_outputs[0]
701
+
702
+ if output_attentions:
703
+ all_attentions = all_attentions + (layer_outputs[1],)
704
+
705
+ hidden_states = self.layer_norm(hidden_states)
706
+
707
+ if output_hidden_states:
708
+ encoder_states = encoder_states + (hidden_states,)
709
+
710
+ if not return_dict:
711
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
712
+ return BaseModelOutput(
713
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
714
+ )
715
+
716
+
717
+ class LDMBertModel(LDMBertPreTrainedModel):
718
+ _no_split_modules = []
719
+
720
+ def __init__(self, config: LDMBertConfig):
721
+ super().__init__(config)
722
+ self.model = LDMBertEncoder(config)
723
+ self.to_logits = nn.Linear(config.hidden_size, config.vocab_size)
724
+
725
+ def forward(
726
+ self,
727
+ input_ids=None,
728
+ attention_mask=None,
729
+ position_ids=None,
730
+ head_mask=None,
731
+ inputs_embeds=None,
732
+ output_attentions=None,
733
+ output_hidden_states=None,
734
+ return_dict=None,
735
+ ):
736
+ outputs = self.model(
737
+ input_ids,
738
+ attention_mask=attention_mask,
739
+ position_ids=position_ids,
740
+ head_mask=head_mask,
741
+ inputs_embeds=inputs_embeds,
742
+ output_attentions=output_attentions,
743
+ output_hidden_states=output_hidden_states,
744
+ return_dict=return_dict,
745
+ )
746
+ return outputs
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import List, Optional, Tuple, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+ import torch
7
+ import torch.utils.checkpoint
8
+
9
+ from ...models import UNet2DModel, VQModel
10
+ from ...schedulers import (
11
+ DDIMScheduler,
12
+ DPMSolverMultistepScheduler,
13
+ EulerAncestralDiscreteScheduler,
14
+ EulerDiscreteScheduler,
15
+ LMSDiscreteScheduler,
16
+ PNDMScheduler,
17
+ )
18
+ from ...utils import PIL_INTERPOLATION
19
+ from ...utils.torch_utils import randn_tensor
20
+ from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
21
+
22
+
23
+ def preprocess(image):
24
+ w, h = image.size
25
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
26
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
27
+ image = np.array(image).astype(np.float32) / 255.0
28
+ image = image[None].transpose(0, 3, 1, 2)
29
+ image = torch.from_numpy(image)
30
+ return 2.0 * image - 1.0
31
+
32
+
33
+ class LDMSuperResolutionPipeline(DiffusionPipeline):
34
+ r"""
35
+ A pipeline for image super-resolution using latent diffusion.
36
+
37
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
38
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
39
+
40
+ Parameters:
41
+ vqvae ([`VQModel`]):
42
+ Vector-quantized (VQ) model to encode and decode images to and from latent representations.
43
+ unet ([`UNet2DModel`]):
44
+ A `UNet2DModel` to denoise the encoded image.
45
+ scheduler ([`SchedulerMixin`]):
46
+ A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
47
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`EulerDiscreteScheduler`],
48
+ [`EulerAncestralDiscreteScheduler`], [`DPMSolverMultistepScheduler`], or [`PNDMScheduler`].
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ vqvae: VQModel,
54
+ unet: UNet2DModel,
55
+ scheduler: Union[
56
+ DDIMScheduler,
57
+ PNDMScheduler,
58
+ LMSDiscreteScheduler,
59
+ EulerDiscreteScheduler,
60
+ EulerAncestralDiscreteScheduler,
61
+ DPMSolverMultistepScheduler,
62
+ ],
63
+ ):
64
+ super().__init__()
65
+ self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler)
66
+
67
+ @torch.no_grad()
68
+ def __call__(
69
+ self,
70
+ image: Union[torch.Tensor, PIL.Image.Image] = None,
71
+ batch_size: Optional[int] = 1,
72
+ num_inference_steps: Optional[int] = 100,
73
+ eta: Optional[float] = 0.0,
74
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
75
+ output_type: Optional[str] = "pil",
76
+ return_dict: bool = True,
77
+ ) -> Union[Tuple, ImagePipelineOutput]:
78
+ r"""
79
+ The call function to the pipeline for generation.
80
+
81
+ Args:
82
+ image (`torch.Tensor` or `PIL.Image.Image`):
83
+ `Image` or tensor representing an image batch to be used as the starting point for the process.
84
+ batch_size (`int`, *optional*, defaults to 1):
85
+ Number of images to generate.
86
+ num_inference_steps (`int`, *optional*, defaults to 100):
87
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
88
+ expense of slower inference.
89
+ eta (`float`, *optional*, defaults to 0.0):
90
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
91
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
92
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
93
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
94
+ generation deterministic.
95
+ output_type (`str`, *optional*, defaults to `"pil"`):
96
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
97
+ return_dict (`bool`, *optional*, defaults to `True`):
98
+ Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
99
+
100
+ Example:
101
+
102
+ ```py
103
+ >>> import requests
104
+ >>> from PIL import Image
105
+ >>> from io import BytesIO
106
+ >>> from diffusers import LDMSuperResolutionPipeline
107
+ >>> import torch
108
+
109
+ >>> # load model and scheduler
110
+ >>> pipeline = LDMSuperResolutionPipeline.from_pretrained("CompVis/ldm-super-resolution-4x-openimages")
111
+ >>> pipeline = pipeline.to("cuda")
112
+
113
+ >>> # let's download an image
114
+ >>> url = (
115
+ ... "https://user-images.githubusercontent.com/38061659/199705896-b48e17b8-b231-47cd-a270-4ffa5a93fa3e.png"
116
+ ... )
117
+ >>> response = requests.get(url)
118
+ >>> low_res_img = Image.open(BytesIO(response.content)).convert("RGB")
119
+ >>> low_res_img = low_res_img.resize((128, 128))
120
+
121
+ >>> # run pipeline in inference (sample random noise and denoise)
122
+ >>> upscaled_image = pipeline(low_res_img, num_inference_steps=100, eta=1).images[0]
123
+ >>> # save image
124
+ >>> upscaled_image.save("ldm_generated_image.png")
125
+ ```
126
+
127
+ Returns:
128
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
129
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
130
+ returned where the first element is a list with the generated images
131
+ """
132
+ if isinstance(image, PIL.Image.Image):
133
+ batch_size = 1
134
+ elif isinstance(image, torch.Tensor):
135
+ batch_size = image.shape[0]
136
+ else:
137
+ raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(image)}")
138
+
139
+ if isinstance(image, PIL.Image.Image):
140
+ image = preprocess(image)
141
+
142
+ height, width = image.shape[-2:]
143
+
144
+ # in_channels should be 6: 3 for latents, 3 for low resolution image
145
+ latents_shape = (batch_size, self.unet.config.in_channels // 2, height, width)
146
+ latents_dtype = next(self.unet.parameters()).dtype
147
+
148
+ latents = randn_tensor(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
149
+
150
+ image = image.to(device=self.device, dtype=latents_dtype)
151
+
152
+ # set timesteps and move to the correct device
153
+ self.scheduler.set_timesteps(num_inference_steps, device=self.device)
154
+ timesteps_tensor = self.scheduler.timesteps
155
+
156
+ # scale the initial noise by the standard deviation required by the scheduler
157
+ latents = latents * self.scheduler.init_noise_sigma
158
+
159
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
160
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
161
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
162
+ # and should be between [0, 1]
163
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
164
+ extra_kwargs = {}
165
+ if accepts_eta:
166
+ extra_kwargs["eta"] = eta
167
+
168
+ for t in self.progress_bar(timesteps_tensor):
169
+ # concat latents and low resolution image in the channel dimension.
170
+ latents_input = torch.cat([latents, image], dim=1)
171
+ latents_input = self.scheduler.scale_model_input(latents_input, t)
172
+ # predict the noise residual
173
+ noise_pred = self.unet(latents_input, t).sample
174
+ # compute the previous noisy sample x_t -> x_t-1
175
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample
176
+
177
+ # decode the image latents with the VQVAE
178
+ image = self.vqvae.decode(latents).sample
179
+ image = torch.clamp(image, -1.0, 1.0)
180
+ image = image / 2 + 0.5
181
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
182
+
183
+ if output_type == "pil":
184
+ image = self.numpy_to_pil(image)
185
+
186
+ if not return_dict:
187
+ return (image,)
188
+
189
+ return ImagePipelineOutput(images=image)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/ledits_pp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/ledits_pp/__pycache__/pipeline_leditspp_stable_diffusion.cpython-310.pyc ADDED
Binary file (47.9 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ...utils import (
4
+ DIFFUSERS_SLOW_IMPORT,
5
+ OptionalDependencyNotAvailable,
6
+ _LazyModule,
7
+ get_objects_from_module,
8
+ is_torch_available,
9
+ is_transformers_available,
10
+ )
11
+
12
+
13
+ _dummy_objects = {}
14
+ _import_structure = {}
15
+
16
+
17
+ try:
18
+ if not (is_transformers_available() and is_torch_available()):
19
+ raise OptionalDependencyNotAvailable()
20
+ except OptionalDependencyNotAvailable:
21
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
22
+
23
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
24
+ else:
25
+ _import_structure["pipeline_stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"]
26
+
27
+ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
28
+ try:
29
+ if not (is_transformers_available() and is_torch_available()):
30
+ raise OptionalDependencyNotAvailable()
31
+
32
+ except OptionalDependencyNotAvailable:
33
+ from ...utils.dummy_torch_and_transformers_objects import *
34
+ else:
35
+ from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
36
+
37
+ else:
38
+ import sys
39
+
40
+ sys.modules[__name__] = _LazyModule(
41
+ __name__,
42
+ globals()["__file__"],
43
+ _import_structure,
44
+ module_spec=__spec__,
45
+ )
46
+
47
+ for name, value in _dummy_objects.items():
48
+ setattr(sys.modules[__name__], name, value)
evalkit_tf437/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_attend_and_excite/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.11 kB). View file