English
naveensp commited on
Commit
7496fe7
·
verified ·
1 Parent(s): b79fb53

Upload torch_util.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. torch_util.py +139 -0
torch_util.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import os
3
+ from typing import Optional, TypeVar
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+
8
+ T = TypeVar("T")
9
+
10
+
11
+ def seed_all(seed: int):
12
+ """Seed all rng objects."""
13
+ import random
14
+
15
+ import numpy as np
16
+
17
+ if seed < 0 or seed > 2**32 - 1:
18
+ raise ValueError(f"Seed {seed} is invalid. It must be on [0; 2^32 - 1]")
19
+ random.seed(seed)
20
+ np.random.seed(seed)
21
+ torch.manual_seed(seed)
22
+ # torch.manual_seed may call manual_seed_all but calling it again here
23
+ # to make sure it gets called at least once
24
+ torch.cuda.manual_seed_all(seed)
25
+
26
+
27
+ def is_distributed() -> bool:
28
+ return dist.is_available() and dist.is_initialized()
29
+
30
+
31
+ def get_node_rank() -> int:
32
+ return int(os.environ.get("NODE_RANK") or (get_global_rank() - get_local_rank()) // get_local_world_size())
33
+
34
+
35
+ def get_world_size() -> int:
36
+ if is_distributed():
37
+ return dist.get_world_size()
38
+ else:
39
+ return 1
40
+
41
+
42
+ def get_local_world_size() -> int:
43
+ return int(os.environ.get("LOCAL_WORLD_SIZE") or 1)
44
+
45
+
46
+ def get_global_rank() -> int:
47
+ return int(os.environ.get("RANK") or dist.get_rank())
48
+
49
+
50
+ def get_local_rank() -> int:
51
+ return int(os.environ.get("LOCAL_RANK") or 0)
52
+
53
+
54
+ def get_fs_local_rank() -> int:
55
+ """Get the local rank per filesystem, meaning that, regardless of the number of nodes,
56
+ if all ranks share the same filesystem then `get_fs_local_rank()` will be equivalent to `get_global_rank()`,
57
+ but if nodes do not share the same filesystem then `get_fs_local_rank()` will be equivalent to `get_local_rank()`.
58
+ """
59
+ return int(os.environ.get("FS_LOCAL_RANK") or get_local_rank())
60
+
61
+
62
+ def move_to_device(o: T, device: torch.device) -> T:
63
+ if isinstance(o, torch.Tensor):
64
+ return o.to(device) # type: ignore[return-value]
65
+ elif isinstance(o, dict):
66
+ return {k: move_to_device(v, device) for k, v in o.items()} # type: ignore[return-value]
67
+ elif isinstance(o, list):
68
+ return [move_to_device(x, device) for x in o] # type: ignore[return-value]
69
+ elif isinstance(o, tuple):
70
+ return tuple((move_to_device(x, device) for x in o)) # type: ignore[return-value]
71
+ else:
72
+ return o
73
+
74
+
75
+ def ensure_finite_(x: torch.Tensor, check_neg_inf: bool = True, check_pos_inf: bool = False):
76
+ """
77
+ Modify ``x`` in place to replace ``float("-inf")`` with the minimum value of the dtype when ``check_neg_inf``
78
+ is ``True`` and to replace ``float("inf")`` with the maximum value of the dtype when ``check_pos_inf`` is ``True``.
79
+ """
80
+ if check_neg_inf:
81
+ x.masked_fill_(x == float("-inf"), torch.finfo(x.dtype).min)
82
+ if check_pos_inf:
83
+ x.masked_fill_(x == float("inf"), torch.finfo(x.dtype).max)
84
+
85
+
86
+ def get_default_device() -> torch.device:
87
+ if torch.cuda.is_available() and torch.cuda.is_initialized():
88
+ return torch.device("cuda")
89
+ else:
90
+ return torch.device("cpu")
91
+
92
+
93
+ def barrier() -> None:
94
+ if is_distributed():
95
+ dist.barrier()
96
+
97
+
98
+ def peak_gpu_memory(reset: bool = False) -> Optional[float]:
99
+ """
100
+ Get the peak GPU memory usage in MB across all ranks.
101
+ Only rank 0 will get the final result.
102
+ """
103
+ if not torch.cuda.is_available():
104
+ return None
105
+
106
+ device = torch.device("cuda")
107
+ peak_mb = torch.cuda.max_memory_allocated(device) / 1000000
108
+ if is_distributed():
109
+ peak_mb_tensor = torch.tensor(peak_mb, device=device)
110
+ dist.reduce(peak_mb_tensor, 0, dist.ReduceOp.MAX)
111
+ peak_mb = peak_mb_tensor.item()
112
+
113
+ if reset:
114
+ # Reset peak stats.
115
+ torch.cuda.reset_max_memory_allocated(device)
116
+
117
+ return peak_mb
118
+
119
+
120
+ V = TypeVar("V", bool, int, float)
121
+
122
+
123
+ def synchronize_value(value: V, device: torch.device) -> V:
124
+ if dist.is_available() and dist.is_initialized():
125
+ value_tensor = torch.tensor(value, device=device)
126
+ dist.broadcast(value_tensor, 0)
127
+ return value_tensor.item() # type: ignore
128
+ else:
129
+ return value
130
+
131
+
132
+ def synchronize_flag(flag: bool, device: torch.device) -> bool:
133
+ return synchronize_value(flag, device)
134
+
135
+
136
+ def gc_cuda():
137
+ gc.collect()
138
+ if torch.cuda.is_available():
139
+ torch.cuda.empty_cache()