ZTWHHH commited on
Commit
3d0ddeb
·
verified ·
1 Parent(s): 1fa55d5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. openflamingo/lib/python3.10/site-packages/pycocoevalcap/spice/lib/guava-19.0.jar +3 -0
  3. openflamingo/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc +3 -0
  4. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc +0 -0
  5. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_jit.cpython-310.pyc +0 -0
  6. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/utils.cpython-310.pyc +0 -0
  7. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__init__.py +0 -0
  8. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/utils.cpython-310.pyc +0 -0
  9. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py +23 -0
  10. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py +153 -0
  11. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py +81 -0
  12. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py +279 -0
  13. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc +0 -0
  14. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc +0 -0
  15. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_equalize.cpython-310.pyc +0 -0
  16. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_lower_to_native_backend.cpython-310.pyc +0 -0
  17. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/convert.cpython-310.pyc +0 -0
  18. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/custom_config.cpython-310.pyc +0 -0
  19. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse.cpython-310.pyc +0 -0
  20. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse_handler.cpython-310.pyc +0 -0
  21. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/graph_module.cpython-310.pyc +0 -0
  22. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_fbgemm.cpython-310.pyc +0 -0
  23. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc +0 -0
  24. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/prepare.cpython-310.pyc +0 -0
  25. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/qconfig_mapping_utils.cpython-310.pyc +0 -0
  26. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/quantize_handler.cpython-310.pyc +0 -0
  27. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/tracer.cpython-310.pyc +0 -0
  28. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/utils.cpython-310.pyc +0 -0
  29. openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_visualizer.cpython-310.pyc +0 -0
  30. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/__init__.cpython-310.pyc +0 -0
  31. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/aqlm.cpython-310.pyc +0 -0
  32. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/awq.cpython-310.pyc +0 -0
  33. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/bitnet.cpython-310.pyc +0 -0
  34. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/bitsandbytes.cpython-310.pyc +0 -0
  35. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/deepspeed.cpython-310.pyc +0 -0
  36. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/eetq.cpython-310.pyc +0 -0
  37. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/executorch.cpython-310.pyc +0 -0
  38. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/fbgemm_fp8.cpython-310.pyc +0 -0
  39. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/flash_attention.cpython-310.pyc +0 -0
  40. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/flex_attention.cpython-310.pyc +0 -0
  41. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/fsdp.cpython-310.pyc +0 -0
  42. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/ggml.cpython-310.pyc +0 -0
  43. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/higgs.cpython-310.pyc +0 -0
  44. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/peft.cpython-310.pyc +0 -0
  45. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/quanto.cpython-310.pyc +0 -0
  46. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/sdpa_attention.cpython-310.pyc +0 -0
  47. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/tiktoken.cpython-310.pyc +0 -0
  48. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/tpu.cpython-310.pyc +0 -0
  49. phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/vptq.cpython-310.pyc +0 -0
  50. phi4/lib/python3.10/site-packages/transformers/quantizers/__init__.py +15 -0
.gitattributes CHANGED
@@ -742,3 +742,5 @@ phi4/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text
742
  phi4/lib/libatomic.so.1.2.0 filter=lfs diff=lfs merge=lfs -text
743
  phi4/lib/libncurses.a filter=lfs diff=lfs merge=lfs -text
744
  phi4/lib/libtinfow.so.6.4 filter=lfs diff=lfs merge=lfs -text
 
 
 
742
  phi4/lib/libatomic.so.1.2.0 filter=lfs diff=lfs merge=lfs -text
743
  phi4/lib/libncurses.a filter=lfs diff=lfs merge=lfs -text
744
  phi4/lib/libtinfow.so.6.4 filter=lfs diff=lfs merge=lfs -text
745
+ openflamingo/lib/python3.10/site-packages/pycocoevalcap/spice/lib/guava-19.0.jar filter=lfs diff=lfs merge=lfs -text
746
+ openflamingo/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
openflamingo/lib/python3.10/site-packages/pycocoevalcap/spice/lib/guava-19.0.jar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58d4cc2e05ebb012bbac568b032f75623be1cb6fb096f3c60c72a86f7f057de4
3
+ size 2308517
openflamingo/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d650439881c0406a52ed7dabfa91ad04ec444f5048960f775c34c622cdaaff80
3
+ size 150242
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc ADDED
Binary file (5.64 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_jit.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/utils.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__init__.py ADDED
File without changes
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/_pt2e/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig, DTypeWithConstraints, ObservationType
2
+ from .fbgemm import get_fbgemm_backend_config
3
+ from .native import get_native_backend_config, get_native_backend_config_dict
4
+ from .qnnpack import get_qnnpack_backend_config
5
+ from .tensorrt import get_tensorrt_backend_config, get_tensorrt_backend_config_dict
6
+ from .executorch import get_executorch_backend_config
7
+ from .onednn import get_onednn_backend_config
8
+
9
+ __all__ = [
10
+ "get_fbgemm_backend_config",
11
+ "get_native_backend_config",
12
+ "get_native_backend_config_dict",
13
+ "get_qnnpack_backend_config",
14
+ "get_tensorrt_backend_config",
15
+ "get_tensorrt_backend_config_dict",
16
+ "get_executorch_backend_config",
17
+ "BackendConfig",
18
+ "BackendPatternConfig",
19
+ "DTypeConfig",
20
+ "DTypeWithConstraints",
21
+ "ObservationType",
22
+ "get_onednn_backend_config",
23
+ ]
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ import torch
3
+ from torch.ao.quantization.backend_config import (
4
+ BackendConfig,
5
+ DTypeConfig,
6
+ ObservationType,
7
+ BackendPatternConfig,
8
+ )
9
+
10
+ weighted_op_quint8_dtype_config = DTypeConfig(
11
+ input_dtype=torch.quint8,
12
+ output_dtype=torch.quint8,
13
+ weight_dtype=torch.qint8,
14
+ bias_dtype=torch.float,
15
+ )
16
+ from typing import List
17
+
18
+ def get_linear_configs():
19
+ linear_configs = []
20
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
21
+ dtype_configs = [weighted_op_quint8_dtype_config]
22
+
23
+ # TODO: need to fix the way we insert observers for this pattern
24
+ # should be solved in the new fusion API
25
+ # reason that this doesn't work: the pattern is a bit complicated and we don't
26
+ # have a way to specify which input of the pattern we would like to observe
27
+ # pattern:
28
+ # bias input weight
29
+ # \ | /
30
+ # \ | t
31
+ # \ | /
32
+ # addmm
33
+ # we want to observe "weight" as weight, but there is not way to convey this
34
+ # information with current pattern language
35
+ #
36
+ # right now:
37
+ # original:
38
+ # weight - t \
39
+ # input - addmm
40
+ # observed (no hack):
41
+ # weight - t - observer \
42
+ # input - observer - addmm
43
+ # target:
44
+ # weight - observer - t \
45
+ # input - observer - addmm
46
+
47
+ # def root_node_getter(node_pattern):
48
+ # addmm, bias, act, weight = node_pattern
49
+ # return addmm
50
+
51
+ # linear_configs.append(
52
+ # BackendPatternConfig((torch.ops.aten.addmm.default, MatchAllNode, MatchAllNode, torch.ops.aten.t.default))
53
+ # .set_observation_type(observation_type) # noqa: E131
54
+ # .set_dtype_configs(dtype_configs)
55
+ # ._set_root_node_getter(root_node_getter))
56
+
57
+ linear_configs.append(
58
+ BackendPatternConfig(torch.ops.aten.addmm.default)
59
+ .set_observation_type(observation_type) # noqa: E131
60
+ .set_dtype_configs(dtype_configs)
61
+ ._set_input_type_to_index({"weight": 2, "bias": 0})
62
+ )
63
+ return linear_configs
64
+
65
+ def get_conv_configs():
66
+ conv_configs = []
67
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
68
+ dtype_configs = [weighted_op_quint8_dtype_config]
69
+ conv_configs.append(
70
+ BackendPatternConfig(torch.ops.aten.convolution.default)
71
+ .set_observation_type(observation_type) # noqa: E131
72
+ .set_dtype_configs(dtype_configs)
73
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
74
+ )
75
+ conv_configs.append(
76
+ BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu.default))
77
+ .set_observation_type(observation_type) # noqa: E131
78
+ .set_dtype_configs(dtype_configs)
79
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
80
+ )
81
+ # TODO: remove when functionalization is supported in PT2 mode
82
+ conv_configs.append(
83
+ BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu_.default))
84
+ .set_observation_type(observation_type) # noqa: E131
85
+ .set_dtype_configs(dtype_configs)
86
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
87
+ )
88
+ return conv_configs
89
+
90
+ def get_pooling_configs():
91
+ backend_pattern_configs = []
92
+ observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
93
+ dtype_configs = [weighted_op_quint8_dtype_config]
94
+
95
+ def root_node_getter(node_pattern):
96
+ getitem, maxpool, index = node_pattern
97
+ return maxpool
98
+
99
+ backend_pattern_configs.append(
100
+ BackendPatternConfig()
101
+ ._set_pattern_complex_format((operator.getitem, torch.ops.aten.max_pool2d_with_indices.default, 0))
102
+ .set_observation_type(observation_type) # noqa: E131
103
+ .set_dtype_configs(dtype_configs)
104
+ ._set_root_node_getter(root_node_getter)
105
+ )
106
+
107
+ return backend_pattern_configs
108
+
109
+ def get_relu_configs():
110
+ backend_pattern_configs = []
111
+ observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
112
+ dtype_configs = [weighted_op_quint8_dtype_config]
113
+ backend_pattern_configs.append(
114
+ BackendPatternConfig(torch.ops.aten.relu.default)
115
+ .set_observation_type(observation_type) # noqa: E131
116
+ .set_dtype_configs(dtype_configs))
117
+ return backend_pattern_configs
118
+
119
+ def get_binary_op_configs():
120
+ binary_op_configs: List[BackendPatternConfig] = []
121
+ dtype_configs = [weighted_op_quint8_dtype_config]
122
+ num_tensor_args_to_observation_type_mapping = {
123
+ # TODO: this is not used right now since we have extra check in prepare
124
+ # will need to change this to NO_OBSERVER later after we implemented
125
+ # Tensor dtype inference properly
126
+ 0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
127
+ 1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
128
+ 2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
129
+ }
130
+ for op_with_quantized_bop_scalar_variant in [torch.ops.aten.add.Tensor, torch.ops.aten.add_.Tensor]:
131
+ bop_patterns = [
132
+ (op_with_quantized_bop_scalar_variant, torch.ops.aten.relu.default),
133
+ op_with_quantized_bop_scalar_variant,
134
+ # TODO: remove when functionalization is supported in pt2_mode
135
+ (op_with_quantized_bop_scalar_variant, torch.ops.aten.relu_.default),
136
+ ]
137
+ for bop_pattern in bop_patterns:
138
+ binary_op_configs.append(
139
+ BackendPatternConfig(bop_pattern)
140
+ .set_dtype_configs(dtype_configs) # noqa: E131
141
+ ._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping))
142
+
143
+ return binary_op_configs
144
+
145
+ def get_qnnpack_pt2e_backend_config():
146
+ return (
147
+ BackendConfig("qnnpack_pytorch_2.0_export")
148
+ .set_backend_pattern_configs(get_linear_configs())
149
+ .set_backend_pattern_configs(get_binary_op_configs())
150
+ .set_backend_pattern_configs(get_conv_configs())
151
+ .set_backend_pattern_configs(get_pooling_configs())
152
+ .set_backend_pattern_configs(get_relu_configs())
153
+ )
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from .backend_config import (
3
+ BackendConfig,
4
+ BackendPatternConfig,
5
+ DTypeConfig,
6
+ ObservationType
7
+ )
8
+ from ._common_operator_config_utils import (
9
+ _get_binary_op_configs,
10
+ _get_linear_configs,
11
+ _get_conv_configs,
12
+ _get_share_qparams_op_configs,
13
+ _get_tensor_info_op_configs,
14
+ )
15
+
16
+ __all__ = [
17
+ "get_tensorrt_backend_config",
18
+ "get_tensorrt_backend_config_dict",
19
+ ]
20
+
21
+ def get_tensorrt_backend_config() -> BackendConfig:
22
+ """
23
+ Return the `BackendConfig` for the TensorRT backend.
24
+ NOTE: Current api will change in the future, it's just to unblock experimentation for
25
+ new backends, please don't use it right now.
26
+ TODO: add a README when it's more stable
27
+ """
28
+ # dtype configs
29
+ weighted_op_qint8_dtype_config = DTypeConfig(
30
+ input_dtype=torch.qint8,
31
+ output_dtype=torch.qint8,
32
+ weight_dtype=torch.qint8,
33
+ bias_dtype=torch.float,
34
+ )
35
+ non_weighted_op_qint8_dtype_config = DTypeConfig(
36
+ input_dtype=torch.qint8,
37
+ output_dtype=torch.qint8,
38
+ )
39
+
40
+ addmm_config = BackendPatternConfig(torch.addmm) \
41
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
42
+ .add_dtype_config(weighted_op_qint8_dtype_config) \
43
+ ._set_input_type_to_index({
44
+ "bias": 0,
45
+ "input": 1,
46
+ "weight": 2,
47
+ })
48
+ cat_config = BackendPatternConfig(torch.cat) \
49
+ .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
50
+ .add_dtype_config(non_weighted_op_qint8_dtype_config)
51
+ conv_dtype_configs = [
52
+ weighted_op_qint8_dtype_config,
53
+ ]
54
+ linear_dtype_configs = [
55
+ weighted_op_qint8_dtype_config,
56
+ ]
57
+ binary_op_dtype_configs = [
58
+ weighted_op_qint8_dtype_config,
59
+ ]
60
+ share_qparams_op_dtype_configs = [
61
+ non_weighted_op_qint8_dtype_config,
62
+ ]
63
+ tensor_info_op_dtype_configs = [
64
+ non_weighted_op_qint8_dtype_config,
65
+ ]
66
+ # there might be things not supported in fx2trt, but it will error out
67
+ # during fx2trt conversion and can support them after that
68
+ return BackendConfig("tensorrt") \
69
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
70
+ .set_backend_pattern_config(addmm_config) \
71
+ .set_backend_pattern_config(cat_config) \
72
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
73
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
74
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
75
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs))
76
+
77
+ def get_tensorrt_backend_config_dict():
78
+ """
79
+ Return the `BackendConfig` for the TensorRT backend in dictionary form.
80
+ """
81
+ return get_tensorrt_backend_config().to_dict()
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, List, Callable, Union, Tuple, Type
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from .backend_config import (
7
+ BackendConfig,
8
+ BackendPatternConfig,
9
+ DTypeConfig,
10
+ )
11
+ from ..utils import Pattern
12
+ from ..fuser_method_mappings import (
13
+ _reverse2,
14
+ _reverse3,
15
+ )
16
+
17
+ __all__ = [
18
+ "get_pattern_to_dtype_configs",
19
+ "get_qat_module_classes",
20
+ "get_fused_module_classes",
21
+ "get_pattern_to_input_type_to_index",
22
+ "get_root_module_to_quantized_reference_module",
23
+ "get_fuser_method_mapping",
24
+ "get_module_to_qat_module",
25
+ "get_fusion_pattern_to_root_node_getter",
26
+ "get_fusion_pattern_to_extra_inputs_getter",
27
+ "remove_boolean_dispatch_from_name",
28
+ "pattern_to_human_readable",
29
+ "entry_to_pretty_str",
30
+ ]
31
+
32
+ def get_pattern_to_dtype_configs(backend_config: BackendConfig) -> Dict[Pattern, List[DTypeConfig]]:
33
+ pattern_to_dtype_configs: Dict[Pattern, List[DTypeConfig]] = {}
34
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
35
+ pattern_to_dtype_configs[pattern] = config.dtype_configs
36
+ return pattern_to_dtype_configs
37
+
38
+ def get_qat_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
39
+ qat_module_classes = []
40
+ for config in backend_config.configs:
41
+ if config.qat_module is not None:
42
+ qat_module_classes.append(config.qat_module)
43
+ return tuple(set(qat_module_classes))
44
+
45
+ def get_fused_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
46
+ fused_module_classes = []
47
+ for config in backend_config.configs:
48
+ if config.fused_module is not None:
49
+ fused_module_classes.append(config.fused_module)
50
+ return tuple(set(fused_module_classes))
51
+
52
+ def get_pattern_to_input_type_to_index(backend_config: BackendConfig) -> Dict[Pattern, Dict[str, int]]:
53
+ pattern_to_input_type_to_index: Dict[Pattern, Dict[str, int]] = {}
54
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
55
+ pattern_to_input_type_to_index[pattern] = config._input_type_to_index
56
+ return pattern_to_input_type_to_index
57
+
58
+ def get_root_module_to_quantized_reference_module(
59
+ backend_config: BackendConfig) -> Dict[Type[torch.nn.Module], Type[torch.nn.Module]]:
60
+ mapping: Dict[Type[torch.nn.Module], Type[torch.nn.Module]] = {}
61
+ for config in backend_config.configs:
62
+ if config.root_module is not None and config.reference_quantized_module is not None:
63
+ mapping[config.root_module] = config.reference_quantized_module
64
+ return mapping
65
+
66
+ def get_fuser_method_mapping(backend_config: BackendConfig) -> Dict[Pattern, Union[nn.Sequential, Callable]]:
67
+ fuser_method_mapping : Dict[Pattern, Union[nn.Sequential, Callable]] = {}
68
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
69
+ if config.fuser_method is not None:
70
+ # Note: both the fuser method and the pattern are specified in forward order in the
71
+ # BackendConfig, but the internal pattern matching code uses the reversed nested tuple
72
+ # format, so we need to convert both to the internal format
73
+ fuser_method = _get_fuser_method_in_reversed_nested_tuple_format(config)
74
+ fuser_method_mapping[pattern] = fuser_method
75
+ return fuser_method_mapping
76
+
77
+ def get_module_to_qat_module(backend_config: BackendConfig) -> Dict[Pattern, Type[torch.nn.Module]]:
78
+ module_to_qat_module: Dict[Pattern, Type[torch.nn.Module]] = {}
79
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
80
+ if config.qat_module is not None:
81
+ module_to_qat_module[pattern] = config.qat_module
82
+ return module_to_qat_module
83
+
84
+ def get_fusion_pattern_to_root_node_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
85
+ """ Get a map from fusion pattern to a function that returns the root node
86
+ from the fusion pattern, e.g. the most common one is:
87
+ def get_root_node(node_pattern):
88
+ while not isinstance(node_pattern[-1], Node):
89
+ node_pattern = node_pattern[-1]
90
+ return node_pattern[-1]
91
+ This can work for all patterns whose root node is the "last node" in the pattern,
92
+ e.g. (torch.add, MatchAllNode, (torch.ReLU, torch.Conv2d))
93
+ """
94
+ root_node_getter_mapping: Dict[Pattern, Callable] = {}
95
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
96
+ if config._root_node_getter is not None:
97
+ root_node_getter_mapping[pattern] = config._root_node_getter
98
+ return root_node_getter_mapping
99
+
100
+ def get_fusion_pattern_to_extra_inputs_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
101
+ """ Get a map from fusion pattern to a function that returns extra input nodes
102
+ from the fusion pattern, in the order required by the root node. This is optional,
103
+ if not specified, we will not copy over any extra inputs for the root node.
104
+ Example:
105
+ # Let's say we have the pattern (torch.add, MatchAllNode, (torch.nn.BatchNorm2d, torch.nn.Conv2d))
106
+ # and root node is torch.nn.Conv2d, and the node in MatchAllNode would be an extra
107
+ # argument to the fused module, we can unpack the pattern and return the node at
108
+ # MatchAllNode here
109
+ # we can implement extra_inputs_getter as follows:
110
+ def extra_inputs_getter(pattern) -> List[Any]:
111
+ add, extra_input, conv_pattern = pattern
112
+ return [extra_input]
113
+ """
114
+ extra_inputs_getter_mapping: Dict[Pattern, Callable] = {}
115
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
116
+ if config._extra_inputs_getter is not None:
117
+ extra_inputs_getter_mapping[pattern] = config._extra_inputs_getter
118
+ return extra_inputs_getter_mapping
119
+
120
+ def remove_boolean_dispatch_from_name(p) -> Any:
121
+ """
122
+ Some ops have a default string representation such as
123
+ '<function boolean_dispatch.<locals>.fn at 0x7ff1106bf280>',
124
+ this function replaces them with the hardcoded function names.
125
+ """
126
+ if p is F.fractional_max_pool2d:
127
+ return "torch.nn.functional.fractional_max_pool2d"
128
+ elif p is F.fractional_max_pool3d:
129
+ return "torch.nn.functional.fractional_max_pool3d"
130
+ elif p is F.max_pool1d:
131
+ return "torch.nn.functional.max_pool1d"
132
+ elif p is F.max_pool2d:
133
+ return "torch.nn.functional.max_pool2d"
134
+ elif p is F.max_pool3d:
135
+ return "torch.nn.functional.max_pool3d"
136
+ elif p is F.adaptive_max_pool1d:
137
+ return "torch.nn.functional.adaptive_max_pool1d"
138
+ elif p is F.adaptive_max_pool2d:
139
+ return "torch.nn.functional.adaptive_max_pool2d"
140
+ elif p is F.adaptive_max_pool3d:
141
+ return "torch.nn.functional.adaptive_max_pool3d"
142
+ assert "boolean_dispatch" not in str(p), \
143
+ f"{p} does not have a human readable representation in " + \
144
+ "quantization documentation"
145
+ return p
146
+
147
+ def pattern_to_human_readable(p) -> Any:
148
+ if isinstance(p, tuple):
149
+ # nested patterns, recurse
150
+ return tuple(pattern_to_human_readable(inner_p) for inner_p in p)
151
+ elif isinstance(p, str):
152
+ # method names are already human readable
153
+ return p
154
+ else:
155
+ p = remove_boolean_dispatch_from_name(p)
156
+ return p
157
+
158
+ # TODO(future PR): move backend_config_dict to use dataclass and move this logic to
159
+ # the corresponding __str__ function
160
+ def entry_to_pretty_str(entry) -> str:
161
+ """
162
+ Given a backend_config_dict entry, returns a string with the human readable
163
+ representation of it.
164
+ """
165
+ s = "{\n"
166
+
167
+ # always output the pattern first
168
+ if "pattern" in entry:
169
+ pattern_str = pattern_to_human_readable(entry["pattern"])
170
+
171
+ s += f" 'pattern': {pattern_str},\n"
172
+
173
+ # custom output for dtype_configs to make it look nice
174
+ if "dtype_configs" in entry:
175
+ s += " 'dtype_configs': [\n"
176
+ for dtype_config in entry["dtype_configs"]:
177
+ s += " {\n"
178
+ for k, v in dtype_config.items():
179
+ s += f" '{k}': {v},\n"
180
+ s += " },\n"
181
+ s += " ],\n"
182
+
183
+ # custom output for num_tensor_args_to_observation_type to make it look nice
184
+ if "num_tensor_args_to_observation_type" in entry:
185
+ s += " 'num_tensor_args_to_observation_type': {\n"
186
+ for k, v in entry["num_tensor_args_to_observation_type"].items():
187
+ s += f" {k}: {v},\n"
188
+ s += " },\n"
189
+
190
+ # output all the other fields
191
+ custom_handled_fields = [
192
+ "pattern",
193
+ "dtype_configs",
194
+ "num_tensor_args_to_observation_type",
195
+ ]
196
+ for field_name in entry:
197
+ if field_name in custom_handled_fields:
198
+ continue
199
+ s += f" '{field_name}': {entry[field_name]},\n"
200
+
201
+ s += "}"
202
+ return s
203
+
204
+ def _get_pattern_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Pattern:
205
+ """
206
+ Return the pattern specified in the given config in the reversed nested tuple format
207
+ used internally in the quantization pattern matching code.
208
+
209
+ If the pattern is not a tuple, or the pattern is already specified in the reversed
210
+ nested tuple format, return the pattern as is. Otherwise:
211
+
212
+ For 2-tuples (a, b), return (b, a).
213
+ For 3-tuples (a, b, c), return (c, (b, a)).
214
+
215
+ For example:
216
+ * Given nn.Linear, return nn.Linear
217
+ * Given (nn.Linear, nn.ReLU), return (nn.ReLU, nn.Linear)
218
+ * Given (nn.Conv2d, nn.BatchNorm2d, nn.ReLU), return
219
+ (nn.ReLU, (nn.BatchNorm2d, nn.Conv2d))
220
+
221
+ For context, the reason why this is needed is the user-facing BackendConfig
222
+ API accepts the flat 2-or-3-tuple format in forward order. While this simple
223
+ format handles the vast majority of use cases, it does not handle the more
224
+ complex ones, and so the internal pattern matching code for quantization uses
225
+ the following, more general reversed nested tuple format instead:
226
+
227
+ operator = module_type | functional | torch op | native op | MatchAllNode
228
+ Pattern = (operator, Pattern, Pattern, ...) | operator
229
+
230
+ In the future, we expect to replace the above complex format with the one used
231
+ by the subgraph rewriter in torch.fx, so we don't have to maintain our own
232
+ complex pattern matching code. Then we won't need this helper function anymore.
233
+ """
234
+ if config._pattern_complex_format is not None:
235
+ return config._pattern_complex_format
236
+ if config.pattern is None:
237
+ raise ValueError("Either 'pattern' or 'pattern_complex_format' must be specified")
238
+ if not isinstance(config.pattern, tuple):
239
+ return config.pattern
240
+
241
+ # Pattern is specified in the simple tuple format, need to convert
242
+ if len(config.pattern) == 2:
243
+ (a, b) = config.pattern
244
+ return (b, a)
245
+ elif len(config.pattern) == 3:
246
+ (a, b, c) = config.pattern
247
+ return (c, (b, a))
248
+ else:
249
+ raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern)
250
+
251
+ def _get_fuser_method_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Callable:
252
+ """
253
+ Return the fuser method specified in the given config in the reversed nested
254
+ tuple format used internally in the quantization pattern matching code.
255
+
256
+ If pattern is specified in the reversed nested tuple format, we assume the
257
+ fuser method is also specified in this format and simply return it as is.
258
+ Otherwise, we convert the fuser method as follows:
259
+
260
+ * Given f(is_qat, conv, relu), return f'(is_qat, relu, conv)
261
+ * Given f(is_qat, conv, bn, relu), return f'(is_qat, relu, bn_conv),
262
+ where bn_conv is a 2-tuple (bn, conv)
263
+
264
+ The first argument of a fuser method is always `is_qat` and is not affected
265
+ in the conversion. We currently only support functions with 3 or 4 arguments.
266
+ """
267
+ assert config.fuser_method is not None
268
+ if config._pattern_complex_format is not None:
269
+ return config.fuser_method
270
+ if not isinstance(config.pattern, tuple):
271
+ raise ValueError("Expected pattern to be a tuple, got: ", config.pattern)
272
+
273
+ # Pattern is specified in the simple tuple format, need to convert
274
+ if len(config.pattern) == 2:
275
+ return _reverse2(config.fuser_method)
276
+ elif len(config.pattern) == 3:
277
+ return _reverse3(config.fuser_method)
278
+ else:
279
+ raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern)
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (269 Bytes). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_equalize.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_lower_to_native_backend.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/convert.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/custom_config.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse.cpython-310.pyc ADDED
Binary file (4.03 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse_handler.cpython-310.pyc ADDED
Binary file (4.27 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/graph_module.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_fbgemm.cpython-310.pyc ADDED
Binary file (746 Bytes). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc ADDED
Binary file (3 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/prepare.cpython-310.pyc ADDED
Binary file (33 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/qconfig_mapping_utils.cpython-310.pyc ADDED
Binary file (8.7 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/quantize_handler.cpython-310.pyc ADDED
Binary file (7.02 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/tracer.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/utils.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
openflamingo/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_visualizer.cpython-310.pyc ADDED
Binary file (23.2 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/aqlm.cpython-310.pyc ADDED
Binary file (2.8 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/awq.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/bitnet.cpython-310.pyc ADDED
Binary file (8.99 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/bitsandbytes.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/deepspeed.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/eetq.cpython-310.pyc ADDED
Binary file (3.66 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/executorch.cpython-310.pyc ADDED
Binary file (7.47 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/fbgemm_fp8.cpython-310.pyc ADDED
Binary file (4.72 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/flash_attention.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/flex_attention.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/fsdp.cpython-310.pyc ADDED
Binary file (708 Bytes). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/ggml.cpython-310.pyc ADDED
Binary file (16.6 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/higgs.cpython-310.pyc ADDED
Binary file (19.2 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/peft.cpython-310.pyc ADDED
Binary file (20.3 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/quanto.cpython-310.pyc ADDED
Binary file (2.93 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/sdpa_attention.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/tiktoken.cpython-310.pyc ADDED
Binary file (1.72 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/tpu.cpython-310.pyc ADDED
Binary file (850 Bytes). View file
 
phi4/lib/python3.10/site-packages/transformers/integrations/__pycache__/vptq.cpython-310.pyc ADDED
Binary file (2.72 kB). View file
 
phi4/lib/python3.10/site-packages/transformers/quantizers/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .auto import AutoHfQuantizer, AutoQuantizationConfig
15
+ from .base import HfQuantizer