Ryouko commited on
Commit
e723213
·
verified ·
1 Parent(s): 103b95b

Delete infer/modules/ipex

Browse files
infer/modules/ipex/__init__.py DELETED
@@ -1,165 +0,0 @@
1
- import os
2
- import sys
3
- import contextlib
4
- import torch
5
- import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
6
- from .hijacks import ipex_hijacks
7
- from .attention import attention_init
8
-
9
- # pylint: disable=protected-access, missing-function-docstring, line-too-long
10
-
11
- def ipex_init(): # pylint: disable=too-many-statements
12
- try:
13
- #Replace cuda with xpu:
14
- torch.cuda.current_device = torch.xpu.current_device
15
- torch.cuda.current_stream = torch.xpu.current_stream
16
- torch.cuda.device = torch.xpu.device
17
- torch.cuda.device_count = torch.xpu.device_count
18
- torch.cuda.device_of = torch.xpu.device_of
19
- torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard
20
- torch.cuda.get_device_name = torch.xpu.get_device_name
21
- torch.cuda.get_device_properties = torch.xpu.get_device_properties
22
- torch.cuda.init = torch.xpu.init
23
- torch.cuda.is_available = torch.xpu.is_available
24
- torch.cuda.is_initialized = torch.xpu.is_initialized
25
- torch.cuda.is_current_stream_capturing = lambda: False
26
- torch.cuda.set_device = torch.xpu.set_device
27
- torch.cuda.stream = torch.xpu.stream
28
- torch.cuda.synchronize = torch.xpu.synchronize
29
- torch.cuda.Event = torch.xpu.Event
30
- torch.cuda.Stream = torch.xpu.Stream
31
- torch.cuda.FloatTensor = torch.xpu.FloatTensor
32
- torch.Tensor.cuda = torch.Tensor.xpu
33
- torch.Tensor.is_cuda = torch.Tensor.is_xpu
34
- torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock
35
- torch.cuda._initialized = torch.xpu.lazy_init._initialized
36
- torch.cuda._lazy_seed_tracker = torch.xpu.lazy_init._lazy_seed_tracker
37
- torch.cuda._queued_calls = torch.xpu.lazy_init._queued_calls
38
- torch.cuda._tls = torch.xpu.lazy_init._tls
39
- torch.cuda.threading = torch.xpu.lazy_init.threading
40
- torch.cuda.traceback = torch.xpu.lazy_init.traceback
41
- torch.cuda.Optional = torch.xpu.Optional
42
- torch.cuda.__cached__ = torch.xpu.__cached__
43
- torch.cuda.__loader__ = torch.xpu.__loader__
44
- torch.cuda.ComplexFloatStorage = torch.xpu.ComplexFloatStorage
45
- torch.cuda.Tuple = torch.xpu.Tuple
46
- torch.cuda.streams = torch.xpu.streams
47
- torch.cuda._lazy_new = torch.xpu._lazy_new
48
- torch.cuda.FloatStorage = torch.xpu.FloatStorage
49
- torch.cuda.Any = torch.xpu.Any
50
- torch.cuda.__doc__ = torch.xpu.__doc__
51
- torch.cuda.default_generators = torch.xpu.default_generators
52
- torch.cuda.HalfTensor = torch.xpu.HalfTensor
53
- torch.cuda._get_device_index = torch.xpu._get_device_index
54
- torch.cuda.__path__ = torch.xpu.__path__
55
- torch.cuda.Device = torch.xpu.Device
56
- torch.cuda.IntTensor = torch.xpu.IntTensor
57
- torch.cuda.ByteStorage = torch.xpu.ByteStorage
58
- torch.cuda.set_stream = torch.xpu.set_stream
59
- torch.cuda.BoolStorage = torch.xpu.BoolStorage
60
- torch.cuda.os = torch.xpu.os
61
- torch.cuda.torch = torch.xpu.torch
62
- torch.cuda.BFloat16Storage = torch.xpu.BFloat16Storage
63
- torch.cuda.Union = torch.xpu.Union
64
- torch.cuda.DoubleTensor = torch.xpu.DoubleTensor
65
- torch.cuda.ShortTensor = torch.xpu.ShortTensor
66
- torch.cuda.LongTensor = torch.xpu.LongTensor
67
- torch.cuda.IntStorage = torch.xpu.IntStorage
68
- torch.cuda.LongStorage = torch.xpu.LongStorage
69
- torch.cuda.__annotations__ = torch.xpu.__annotations__
70
- torch.cuda.__package__ = torch.xpu.__package__
71
- torch.cuda.__builtins__ = torch.xpu.__builtins__
72
- torch.cuda.CharTensor = torch.xpu.CharTensor
73
- torch.cuda.List = torch.xpu.List
74
- torch.cuda._lazy_init = torch.xpu._lazy_init
75
- torch.cuda.BFloat16Tensor = torch.xpu.BFloat16Tensor
76
- torch.cuda.DoubleStorage = torch.xpu.DoubleStorage
77
- torch.cuda.ByteTensor = torch.xpu.ByteTensor
78
- torch.cuda.StreamContext = torch.xpu.StreamContext
79
- torch.cuda.ComplexDoubleStorage = torch.xpu.ComplexDoubleStorage
80
- torch.cuda.ShortStorage = torch.xpu.ShortStorage
81
- torch.cuda._lazy_call = torch.xpu._lazy_call
82
- torch.cuda.HalfStorage = torch.xpu.HalfStorage
83
- torch.cuda.random = torch.xpu.random
84
- torch.cuda._device = torch.xpu._device
85
- torch.cuda.classproperty = torch.xpu.classproperty
86
- torch.cuda.__name__ = torch.xpu.__name__
87
- torch.cuda._device_t = torch.xpu._device_t
88
- torch.cuda.warnings = torch.xpu.warnings
89
- torch.cuda.__spec__ = torch.xpu.__spec__
90
- torch.cuda.BoolTensor = torch.xpu.BoolTensor
91
- torch.cuda.CharStorage = torch.xpu.CharStorage
92
- torch.cuda.__file__ = torch.xpu.__file__
93
- torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork
94
- #torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing
95
-
96
- #Memory:
97
- torch.cuda.memory = torch.xpu.memory
98
- if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read():
99
- torch.xpu.empty_cache = lambda: None
100
- torch.cuda.empty_cache = torch.xpu.empty_cache
101
- torch.cuda.memory_stats = torch.xpu.memory_stats
102
- torch.cuda.memory_summary = torch.xpu.memory_summary
103
- torch.cuda.memory_snapshot = torch.xpu.memory_snapshot
104
- torch.cuda.memory_allocated = torch.xpu.memory_allocated
105
- torch.cuda.max_memory_allocated = torch.xpu.max_memory_allocated
106
- torch.cuda.memory_reserved = torch.xpu.memory_reserved
107
- torch.cuda.memory_cached = torch.xpu.memory_reserved
108
- torch.cuda.max_memory_reserved = torch.xpu.max_memory_reserved
109
- torch.cuda.max_memory_cached = torch.xpu.max_memory_reserved
110
- torch.cuda.reset_peak_memory_stats = torch.xpu.reset_peak_memory_stats
111
- torch.cuda.reset_max_memory_cached = torch.xpu.reset_peak_memory_stats
112
- torch.cuda.reset_max_memory_allocated = torch.xpu.reset_peak_memory_stats
113
- torch.cuda.memory_stats_as_nested_dict = torch.xpu.memory_stats_as_nested_dict
114
- torch.cuda.reset_accumulated_memory_stats = torch.xpu.reset_accumulated_memory_stats
115
-
116
- #RNG:
117
- torch.cuda.get_rng_state = torch.xpu.get_rng_state
118
- torch.cuda.get_rng_state_all = torch.xpu.get_rng_state_all
119
- torch.cuda.set_rng_state = torch.xpu.set_rng_state
120
- torch.cuda.set_rng_state_all = torch.xpu.set_rng_state_all
121
- torch.cuda.manual_seed = torch.xpu.manual_seed
122
- torch.cuda.manual_seed_all = torch.xpu.manual_seed_all
123
- torch.cuda.seed = torch.xpu.seed
124
- torch.cuda.seed_all = torch.xpu.seed_all
125
- torch.cuda.initial_seed = torch.xpu.initial_seed
126
-
127
- #AMP:
128
- torch.cuda.amp = torch.xpu.amp
129
- if not hasattr(torch.cuda.amp, "common"):
130
- torch.cuda.amp.common = contextlib.nullcontext()
131
- torch.cuda.amp.common.amp_definitely_not_available = lambda: False
132
- try:
133
- torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
134
- except Exception: # pylint: disable=broad-exception-caught
135
- try:
136
- from .gradscaler import gradscaler_init # pylint: disable=import-outside-toplevel, import-error
137
- gradscaler_init()
138
- torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
139
- except Exception: # pylint: disable=broad-exception-caught
140
- torch.cuda.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
141
-
142
- #C
143
- torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentStream
144
- ipex._C._DeviceProperties.major = 2023
145
- ipex._C._DeviceProperties.minor = 2
146
-
147
- #Fix functions with ipex:
148
- torch.cuda.mem_get_info = lambda device=None: [(torch.xpu.get_device_properties(device).total_memory - torch.xpu.memory_allocated(device)), torch.xpu.get_device_properties(device).total_memory]
149
- torch._utils._get_available_device_type = lambda: "xpu"
150
- torch.has_cuda = True
151
- torch.cuda.has_half = True
152
- torch.cuda.is_bf16_supported = lambda *args, **kwargs: True
153
- torch.cuda.is_fp16_supported = lambda *args, **kwargs: True
154
- torch.version.cuda = "11.7"
155
- torch.cuda.get_device_capability = lambda *args, **kwargs: [11,7]
156
- torch.cuda.get_device_properties.major = 11
157
- torch.cuda.get_device_properties.minor = 7
158
- torch.cuda.ipc_collect = lambda *args, **kwargs: None
159
- torch.cuda.utilization = lambda *args, **kwargs: 0
160
-
161
- ipex_hijacks()
162
- attention_init()
163
- except Exception as e:
164
- return False, e
165
- return True, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
infer/modules/ipex/attention.py DELETED
@@ -1,128 +0,0 @@
1
- import torch
2
- import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
3
-
4
- # pylint: disable=protected-access, missing-function-docstring, line-too-long
5
-
6
- original_torch_bmm = torch.bmm
7
- def torch_bmm(input, mat2, *, out=None):
8
- if input.dtype != mat2.dtype:
9
- mat2 = mat2.to(input.dtype)
10
-
11
- #ARC GPUs can't allocate more than 4GB to a single block, Slice it:
12
- batch_size_attention, input_tokens, mat2_shape = input.shape[0], input.shape[1], mat2.shape[2]
13
- block_multiply = 2.4 if input.dtype == torch.float32 else 1.2
14
- block_size = (batch_size_attention * input_tokens * mat2_shape) / 1024 * block_multiply #MB
15
- split_slice_size = batch_size_attention
16
- if block_size >= 4000:
17
- do_split = True
18
- #Find something divisible with the input_tokens
19
- while ((split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply) > 4000:
20
- split_slice_size = split_slice_size // 2
21
- if split_slice_size <= 1:
22
- split_slice_size = 1
23
- break
24
- else:
25
- do_split = False
26
-
27
- split_block_size = (split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply #MB
28
- split_2_slice_size = input_tokens
29
- if split_block_size >= 4000:
30
- do_split_2 = True
31
- #Find something divisible with the input_tokens
32
- while ((split_slice_size * split_2_slice_size * mat2_shape) / 1024 * block_multiply) > 4000:
33
- split_2_slice_size = split_2_slice_size // 2
34
- if split_2_slice_size <= 1:
35
- split_2_slice_size = 1
36
- break
37
- else:
38
- do_split_2 = False
39
-
40
- if do_split:
41
- hidden_states = torch.zeros(input.shape[0], input.shape[1], mat2.shape[2], device=input.device, dtype=input.dtype)
42
- for i in range(batch_size_attention // split_slice_size):
43
- start_idx = i * split_slice_size
44
- end_idx = (i + 1) * split_slice_size
45
- if do_split_2:
46
- for i2 in range(input_tokens // split_2_slice_size): # pylint: disable=invalid-name
47
- start_idx_2 = i2 * split_2_slice_size
48
- end_idx_2 = (i2 + 1) * split_2_slice_size
49
- hidden_states[start_idx:end_idx, start_idx_2:end_idx_2] = original_torch_bmm(
50
- input[start_idx:end_idx, start_idx_2:end_idx_2],
51
- mat2[start_idx:end_idx, start_idx_2:end_idx_2],
52
- out=out
53
- )
54
- else:
55
- hidden_states[start_idx:end_idx] = original_torch_bmm(
56
- input[start_idx:end_idx],
57
- mat2[start_idx:end_idx],
58
- out=out
59
- )
60
- else:
61
- return original_torch_bmm(input, mat2, out=out)
62
- return hidden_states
63
-
64
- original_scaled_dot_product_attention = torch.nn.functional.scaled_dot_product_attention
65
- def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False):
66
- #ARC GPUs can't allocate more than 4GB to a single block, Slice it:
67
- shape_one, batch_size_attention, query_tokens, shape_four = query.shape
68
- block_multiply = 2.4 if query.dtype == torch.float32 else 1.2
69
- block_size = (shape_one * batch_size_attention * query_tokens * shape_four) / 1024 * block_multiply #MB
70
- split_slice_size = batch_size_attention
71
- if block_size >= 4000:
72
- do_split = True
73
- #Find something divisible with the shape_one
74
- while ((shape_one * split_slice_size * query_tokens * shape_four) / 1024 * block_multiply) > 4000:
75
- split_slice_size = split_slice_size // 2
76
- if split_slice_size <= 1:
77
- split_slice_size = 1
78
- break
79
- else:
80
- do_split = False
81
-
82
- split_block_size = (shape_one * split_slice_size * query_tokens * shape_four) / 1024 * block_multiply #MB
83
- split_2_slice_size = query_tokens
84
- if split_block_size >= 4000:
85
- do_split_2 = True
86
- #Find something divisible with the batch_size_attention
87
- while ((shape_one * split_slice_size * split_2_slice_size * shape_four) / 1024 * block_multiply) > 4000:
88
- split_2_slice_size = split_2_slice_size // 2
89
- if split_2_slice_size <= 1:
90
- split_2_slice_size = 1
91
- break
92
- else:
93
- do_split_2 = False
94
-
95
- if do_split:
96
- hidden_states = torch.zeros(query.shape, device=query.device, dtype=query.dtype)
97
- for i in range(batch_size_attention // split_slice_size):
98
- start_idx = i * split_slice_size
99
- end_idx = (i + 1) * split_slice_size
100
- if do_split_2:
101
- for i2 in range(query_tokens // split_2_slice_size): # pylint: disable=invalid-name
102
- start_idx_2 = i2 * split_2_slice_size
103
- end_idx_2 = (i2 + 1) * split_2_slice_size
104
- hidden_states[:, start_idx:end_idx, start_idx_2:end_idx_2] = original_scaled_dot_product_attention(
105
- query[:, start_idx:end_idx, start_idx_2:end_idx_2],
106
- key[:, start_idx:end_idx, start_idx_2:end_idx_2],
107
- value[:, start_idx:end_idx, start_idx_2:end_idx_2],
108
- attn_mask=attn_mask[:, start_idx:end_idx, start_idx_2:end_idx_2] if attn_mask is not None else attn_mask,
109
- dropout_p=dropout_p, is_causal=is_causal
110
- )
111
- else:
112
- hidden_states[:, start_idx:end_idx] = original_scaled_dot_product_attention(
113
- query[:, start_idx:end_idx],
114
- key[:, start_idx:end_idx],
115
- value[:, start_idx:end_idx],
116
- attn_mask=attn_mask[:, start_idx:end_idx] if attn_mask is not None else attn_mask,
117
- dropout_p=dropout_p, is_causal=is_causal
118
- )
119
- else:
120
- return original_scaled_dot_product_attention(
121
- query, key, value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal
122
- )
123
- return hidden_states
124
-
125
- def attention_init():
126
- #ARC GPUs can't allocate more than 4GB to a single block:
127
- torch.bmm = torch_bmm
128
- torch.nn.functional.scaled_dot_product_attention = scaled_dot_product_attention
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
infer/modules/ipex/gradscaler.py DELETED
@@ -1,179 +0,0 @@
1
- from collections import defaultdict
2
- import torch
3
- import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
4
- import intel_extension_for_pytorch._C as core # pylint: disable=import-error, unused-import
5
-
6
- # pylint: disable=protected-access, missing-function-docstring, line-too-long
7
-
8
- OptState = ipex.cpu.autocast._grad_scaler.OptState
9
- _MultiDeviceReplicator = ipex.cpu.autocast._grad_scaler._MultiDeviceReplicator
10
- _refresh_per_optimizer_state = ipex.cpu.autocast._grad_scaler._refresh_per_optimizer_state
11
-
12
- def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16): # pylint: disable=unused-argument
13
- per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
14
- per_device_found_inf = _MultiDeviceReplicator(found_inf)
15
-
16
- # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
17
- # There could be hundreds of grads, so we'd like to iterate through them just once.
18
- # However, we don't know their devices or dtypes in advance.
19
-
20
- # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
21
- # Google says mypy struggles with defaultdicts type annotations.
22
- per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
23
- # sync grad to master weight
24
- if hasattr(optimizer, "sync_grad"):
25
- optimizer.sync_grad()
26
- with torch.no_grad():
27
- for group in optimizer.param_groups:
28
- for param in group["params"]:
29
- if param.grad is None:
30
- continue
31
- if (not allow_fp16) and param.grad.dtype == torch.float16:
32
- raise ValueError("Attempting to unscale FP16 gradients.")
33
- if param.grad.is_sparse:
34
- # is_coalesced() == False means the sparse grad has values with duplicate indices.
35
- # coalesce() deduplicates indices and adds all values that have the same index.
36
- # For scaled fp16 values, there's a good chance coalescing will cause overflow,
37
- # so we should check the coalesced _values().
38
- if param.grad.dtype is torch.float16:
39
- param.grad = param.grad.coalesce()
40
- to_unscale = param.grad._values()
41
- else:
42
- to_unscale = param.grad
43
-
44
- # -: is there a way to split by device and dtype without appending in the inner loop?
45
- to_unscale = to_unscale.to("cpu")
46
- per_device_and_dtype_grads[to_unscale.device][
47
- to_unscale.dtype
48
- ].append(to_unscale)
49
-
50
- for _, per_dtype_grads in per_device_and_dtype_grads.items():
51
- for grads in per_dtype_grads.values():
52
- core._amp_foreach_non_finite_check_and_unscale_(
53
- grads,
54
- per_device_found_inf.get("cpu"),
55
- per_device_inv_scale.get("cpu"),
56
- )
57
-
58
- return per_device_found_inf._per_device_tensors
59
-
60
- def unscale_(self, optimizer):
61
- """
62
- Divides ("unscales") the optimizer's gradient tensors by the scale factor.
63
- :meth:`unscale_` is optional, serving cases where you need to
64
- :ref:`modify or inspect gradients<working-with-unscaled-gradients>`
65
- between the backward pass(es) and :meth:`step`.
66
- If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
67
- Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
68
- ...
69
- scaler.scale(loss).backward()
70
- scaler.unscale_(optimizer)
71
- torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
72
- scaler.step(optimizer)
73
- scaler.update()
74
- Args:
75
- optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
76
- .. warning::
77
- :meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
78
- and only after all gradients for that optimizer's assigned parameters have been accumulated.
79
- Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
80
- .. warning::
81
- :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
82
- """
83
- if not self._enabled:
84
- return
85
-
86
- self._check_scale_growth_tracker("unscale_")
87
-
88
- optimizer_state = self._per_optimizer_states[id(optimizer)]
89
-
90
- if optimizer_state["stage"] is OptState.UNSCALED: # pylint: disable=no-else-raise
91
- raise RuntimeError(
92
- "unscale_() has already been called on this optimizer since the last update()."
93
- )
94
- elif optimizer_state["stage"] is OptState.STEPPED:
95
- raise RuntimeError("unscale_() is being called after step().")
96
-
97
- # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
98
- assert self._scale is not None
99
- inv_scale = self._scale.to("cpu").double().reciprocal().float().to(self._scale.device)
100
- found_inf = torch.full(
101
- (1,), 0.0, dtype=torch.float32, device=self._scale.device
102
- )
103
-
104
- optimizer_state["found_inf_per_device"] = self._unscale_grads_(
105
- optimizer, inv_scale, found_inf, False
106
- )
107
- optimizer_state["stage"] = OptState.UNSCALED
108
-
109
- def update(self, new_scale=None):
110
- """
111
- Updates the scale factor.
112
- If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
113
- to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
114
- the scale is multiplied by ``growth_factor`` to increase it.
115
- Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
116
- used directly, it's used to fill GradScaler's internal scale tensor. So if
117
- ``new_scale`` was a tensor, later in-place changes to that tensor will not further
118
- affect the scale GradScaler uses internally.)
119
- Args:
120
- new_scale (float or :class:`torch.FloatTensor`, optional, default=None): New scale factor.
121
- .. warning::
122
- :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
123
- been invoked for all optimizers used this iteration.
124
- """
125
- if not self._enabled:
126
- return
127
-
128
- _scale, _growth_tracker = self._check_scale_growth_tracker("update")
129
-
130
- if new_scale is not None:
131
- # Accept a new user-defined scale.
132
- if isinstance(new_scale, float):
133
- self._scale.fill_(new_scale) # type: ignore[union-attr]
134
- else:
135
- reason = "new_scale should be a float or a 1-element torch.FloatTensor with requires_grad=False."
136
- assert isinstance(new_scale, torch.FloatTensor), reason # type: ignore[attr-defined]
137
- assert new_scale.numel() == 1, reason
138
- assert new_scale.requires_grad is False, reason
139
- self._scale.copy_(new_scale) # type: ignore[union-attr]
140
- else:
141
- # Consume shared inf/nan data collected from optimizers to update the scale.
142
- # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
143
- found_infs = [
144
- found_inf.to(device="cpu", non_blocking=True)
145
- for state in self._per_optimizer_states.values()
146
- for found_inf in state["found_inf_per_device"].values()
147
- ]
148
-
149
- assert len(found_infs) > 0, "No inf checks were recorded prior to update."
150
-
151
- found_inf_combined = found_infs[0]
152
- if len(found_infs) > 1:
153
- for i in range(1, len(found_infs)):
154
- found_inf_combined += found_infs[i]
155
-
156
- to_device = _scale.device
157
- _scale = _scale.to("cpu")
158
- _growth_tracker = _growth_tracker.to("cpu")
159
-
160
- core._amp_update_scale_(
161
- _scale,
162
- _growth_tracker,
163
- found_inf_combined,
164
- self._growth_factor,
165
- self._backoff_factor,
166
- self._growth_interval,
167
- )
168
-
169
- _scale = _scale.to(to_device)
170
- _growth_tracker = _growth_tracker.to(to_device)
171
- # To prepare for next iteration, clear the data collected from optimizers this iteration.
172
- self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
173
-
174
- def gradscaler_init():
175
- torch.xpu.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
176
- torch.xpu.amp.GradScaler._unscale_grads_ = _unscale_grads_
177
- torch.xpu.amp.GradScaler.unscale_ = unscale_
178
- torch.xpu.amp.GradScaler.update = update
179
- return torch.xpu.amp.GradScaler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
infer/modules/ipex/hijacks.py DELETED
@@ -1,196 +0,0 @@
1
- import contextlib
2
- import importlib
3
- import torch
4
- import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
5
-
6
- # pylint: disable=protected-access, missing-function-docstring, line-too-long, unnecessary-lambda, no-else-return
7
-
8
- class CondFunc: # pylint: disable=missing-class-docstring
9
- def __new__(cls, orig_func, sub_func, cond_func):
10
- self = super(CondFunc, cls).__new__(cls)
11
- if isinstance(orig_func, str):
12
- func_path = orig_func.split('.')
13
- for i in range(len(func_path)-1, -1, -1):
14
- try:
15
- resolved_obj = importlib.import_module('.'.join(func_path[:i]))
16
- break
17
- except ImportError:
18
- pass
19
- for attr_name in func_path[i:-1]:
20
- resolved_obj = getattr(resolved_obj, attr_name)
21
- orig_func = getattr(resolved_obj, func_path[-1])
22
- setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
23
- self.__init__(orig_func, sub_func, cond_func)
24
- return lambda *args, **kwargs: self(*args, **kwargs)
25
- def __init__(self, orig_func, sub_func, cond_func):
26
- self.__orig_func = orig_func
27
- self.__sub_func = sub_func
28
- self.__cond_func = cond_func
29
- def __call__(self, *args, **kwargs):
30
- if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
31
- return self.__sub_func(self.__orig_func, *args, **kwargs)
32
- else:
33
- return self.__orig_func(*args, **kwargs)
34
-
35
- _utils = torch.utils.data._utils
36
- def _shutdown_workers(self):
37
- if torch.utils.data._utils is None or torch.utils.data._utils.python_exit_status is True or torch.utils.data._utils.python_exit_status is None:
38
- return
39
- if hasattr(self, "_shutdown") and not self._shutdown:
40
- self._shutdown = True
41
- try:
42
- if hasattr(self, '_pin_memory_thread'):
43
- self._pin_memory_thread_done_event.set()
44
- self._worker_result_queue.put((None, None))
45
- self._pin_memory_thread.join()
46
- self._worker_result_queue.cancel_join_thread()
47
- self._worker_result_queue.close()
48
- self._workers_done_event.set()
49
- for worker_id in range(len(self._workers)):
50
- if self._persistent_workers or self._workers_status[worker_id]:
51
- self._mark_worker_as_unavailable(worker_id, shutdown=True)
52
- for w in self._workers: # pylint: disable=invalid-name
53
- w.join(timeout=torch.utils.data._utils.MP_STATUS_CHECK_INTERVAL)
54
- for q in self._index_queues: # pylint: disable=invalid-name
55
- q.cancel_join_thread()
56
- q.close()
57
- finally:
58
- if self._worker_pids_set:
59
- torch.utils.data._utils.signal_handling._remove_worker_pids(id(self))
60
- self._worker_pids_set = False
61
- for w in self._workers: # pylint: disable=invalid-name
62
- if w.is_alive():
63
- w.terminate()
64
-
65
- class DummyDataParallel(torch.nn.Module): # pylint: disable=missing-class-docstring, unused-argument, too-few-public-methods
66
- def __new__(cls, module, device_ids=None, output_device=None, dim=0): # pylint: disable=unused-argument
67
- if isinstance(device_ids, list) and len(device_ids) > 1:
68
- print("IPEX backend doesn't support DataParallel on multiple XPU devices")
69
- return module.to("xpu")
70
-
71
- def return_null_context(*args, **kwargs): # pylint: disable=unused-argument
72
- return contextlib.nullcontext()
73
-
74
- def check_device(device):
75
- return bool((isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and "cuda" in device) or isinstance(device, int))
76
-
77
- def return_xpu(device):
78
- return f"xpu:{device[-1]}" if isinstance(device, str) and ":" in device else f"xpu:{device}" if isinstance(device, int) else torch.device("xpu") if isinstance(device, torch.device) else "xpu"
79
-
80
- def ipex_no_cuda(orig_func, *args, **kwargs):
81
- torch.cuda.is_available = lambda: False
82
- orig_func(*args, **kwargs)
83
- torch.cuda.is_available = torch.xpu.is_available
84
-
85
- original_autocast = torch.autocast
86
- def ipex_autocast(*args, **kwargs):
87
- if len(args) > 0 and args[0] == "cuda":
88
- return original_autocast("xpu", *args[1:], **kwargs)
89
- else:
90
- return original_autocast(*args, **kwargs)
91
-
92
- original_torch_cat = torch.cat
93
- def torch_cat(tensor, *args, **kwargs):
94
- if len(tensor) == 3 and (tensor[0].dtype != tensor[1].dtype or tensor[2].dtype != tensor[1].dtype):
95
- return original_torch_cat([tensor[0].to(tensor[1].dtype), tensor[1], tensor[2].to(tensor[1].dtype)], *args, **kwargs)
96
- else:
97
- return original_torch_cat(tensor, *args, **kwargs)
98
-
99
- original_interpolate = torch.nn.functional.interpolate
100
- def interpolate(tensor, size=None, scale_factor=None, mode='nearest', align_corners=None, recompute_scale_factor=None, antialias=False): # pylint: disable=too-many-arguments
101
- if antialias or align_corners is not None:
102
- return_device = tensor.device
103
- return_dtype = tensor.dtype
104
- return original_interpolate(tensor.to("cpu", dtype=torch.float32), size=size, scale_factor=scale_factor, mode=mode,
105
- align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias).to(return_device, dtype=return_dtype)
106
- else:
107
- return original_interpolate(tensor, size=size, scale_factor=scale_factor, mode=mode,
108
- align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias)
109
-
110
- original_linalg_solve = torch.linalg.solve
111
- def linalg_solve(A, B, *args, **kwargs): # pylint: disable=invalid-name
112
- if A.device != torch.device("cpu") or B.device != torch.device("cpu"):
113
- return_device = A.device
114
- return original_linalg_solve(A.to("cpu"), B.to("cpu"), *args, **kwargs).to(return_device)
115
- else:
116
- return original_linalg_solve(A, B, *args, **kwargs)
117
-
118
- def ipex_hijacks():
119
- CondFunc('torch.Tensor.to',
120
- lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs),
121
- lambda orig_func, self, device=None, *args, **kwargs: check_device(device))
122
- CondFunc('torch.Tensor.cuda',
123
- lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs),
124
- lambda orig_func, self, device=None, *args, **kwargs: check_device(device))
125
- CondFunc('torch.empty',
126
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
127
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
128
- CondFunc('torch.load',
129
- lambda orig_func, *args, map_location=None, **kwargs: orig_func(*args, return_xpu(map_location), **kwargs),
130
- lambda orig_func, *args, map_location=None, **kwargs: map_location is None or check_device(map_location))
131
- CondFunc('torch.randn',
132
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
133
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
134
- CondFunc('torch.ones',
135
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
136
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
137
- CondFunc('torch.zeros',
138
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
139
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
140
- CondFunc('torch.tensor',
141
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
142
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
143
- CondFunc('torch.linspace',
144
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
145
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
146
-
147
- CondFunc('torch.Generator',
148
- lambda orig_func, device=None: torch.xpu.Generator(device),
149
- lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu")
150
-
151
- CondFunc('torch.batch_norm',
152
- lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
153
- weight if weight is not None else torch.ones(input.size()[1], device=input.device),
154
- bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
155
- lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
156
- CondFunc('torch.instance_norm',
157
- lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
158
- weight if weight is not None else torch.ones(input.size()[1], device=input.device),
159
- bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
160
- lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
161
-
162
- #Functions with dtype errors:
163
- CondFunc('torch.nn.modules.GroupNorm.forward',
164
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
165
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
166
- CondFunc('torch.nn.modules.linear.Linear.forward',
167
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
168
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
169
- CondFunc('torch.nn.modules.conv.Conv2d.forward',
170
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
171
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
172
- CondFunc('torch.nn.functional.layer_norm',
173
- lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
174
- orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs),
175
- lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
176
- weight is not None and input.dtype != weight.data.dtype)
177
-
178
- #Diffusers Float64 (ARC GPUs doesn't support double or Float64):
179
- if not torch.xpu.has_fp64_dtype():
180
- CondFunc('torch.from_numpy',
181
- lambda orig_func, ndarray: orig_func(ndarray.astype('float32')),
182
- lambda orig_func, ndarray: ndarray.dtype == float)
183
-
184
- #Broken functions when torch.cuda.is_available is True:
185
- CondFunc('torch.utils.data.dataloader._BaseDataLoaderIter.__init__',
186
- lambda orig_func, *args, **kwargs: ipex_no_cuda(orig_func, *args, **kwargs),
187
- lambda orig_func, *args, **kwargs: True)
188
-
189
- #Functions that make compile mad with CondFunc:
190
- torch.utils.data.dataloader._MultiProcessingDataLoaderIter._shutdown_workers = _shutdown_workers
191
- torch.nn.DataParallel = DummyDataParallel
192
- torch.autocast = ipex_autocast
193
- torch.cat = torch_cat
194
- torch.linalg.solve = linalg_solve
195
- torch.nn.functional.interpolate = interpolate
196
- torch.backends.cuda.sdp_kernel = return_null_context