azzzacs commited on
Commit
36ab767
·
verified ·
1 Parent(s): 585e17b

Upload occupy.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. occupy.py +148 -0
occupy.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import torch
3
+ import time
4
+ import datetime
5
+ import argparse
6
+
7
+ def occupy_gpu_memory(gpu_id, fraction, extra_reserve_gb):
8
+ """在单张 GPU 上尝试分配显存,并支持重试。"""
9
+ try:
10
+ torch.cuda.set_device(gpu_id)
11
+ prop = torch.cuda.get_device_properties(gpu_id)
12
+ total_memory = prop.total_memory
13
+ total_gb = total_memory / 1024**3
14
+
15
+ target_reserve_bytes = int(total_memory * fraction) - int(extra_reserve_gb * 1024**3)
16
+
17
+ print("GPU {} ({}): total={:.2f} GB, initial target occupying ~= {:.2f} GB".format(
18
+ gpu_id, prop.name, total_gb, target_reserve_bytes / 1024**3))
19
+
20
+ if target_reserve_bytes <= 0:
21
+ print("GPU {}: Target occupation is non-positive, skipping.".format(gpu_id))
22
+ return None
23
+
24
+ # 尝试分配,如果 OOM 则减小尺寸重试
25
+ for attempt in range(5): # 最多重试 5 次
26
+ try:
27
+ num_elems = target_reserve_bytes // 4
28
+ if num_elems <= 0: return None
29
+
30
+ tensor = torch.randn(num_elems, dtype=torch.float32, device="cuda:{}".format(gpu_id))
31
+ torch.cuda.synchronize(gpu_id)
32
+ allocated_gb = tensor.element_size() * tensor.numel() / 1024**3
33
+ print("GPU {}: Successfully occupied {:.2f} GB.".format(gpu_id, allocated_gb))
34
+ return tensor
35
+ except RuntimeError as e:
36
+ if "out of memory" in str(e).lower():
37
+ print("GPU {}: OOM on attempt {}. Reducing target by 256 MB and retrying...".format(
38
+ gpu_id, attempt + 1))
39
+ target_reserve_bytes -= 256 * 1024 * 1024
40
+ else:
41
+ print("GPU {}: A non-OOM runtime error occurred: {}".format(gpu_id, e))
42
+ return None
43
+
44
+ print("GPU {}: Failed to allocate memory after all attempts.".format(gpu_id))
45
+ return None
46
+
47
+ except Exception as e:
48
+ print("An unexpected error occurred while processing GPU {}: {}".format(gpu_id, e))
49
+ return None
50
+
51
+ def parse_gpu_selection(gpu_arg, max_gpus):
52
+ """Parse GPU selection string like '0,1' or 'cuda:0,cuda:1'."""
53
+ if gpu_arg is None:
54
+ return list(range(max_gpus))
55
+
56
+ selected = []
57
+ for token in gpu_arg.split(","):
58
+ token = token.strip()
59
+ if not token:
60
+ continue
61
+ if token.lower().startswith("cuda:"):
62
+ token = token.split(":", 1)[1]
63
+ try:
64
+ idx = int(token)
65
+ except ValueError:
66
+ raise ValueError("Invalid GPU identifier '{}'.".format(token))
67
+ if idx < 0 or idx >= max_gpus:
68
+ raise ValueError("GPU index {} is out of range [0, {}).".format(idx, max_gpus))
69
+ if idx not in selected:
70
+ selected.append(idx)
71
+
72
+ if not selected:
73
+ raise ValueError("No valid GPU identifiers were provided.")
74
+ return selected
75
+
76
+
77
+ def main(args):
78
+ num_gpus = torch.cuda.device_count()
79
+ if num_gpus == 0:
80
+ raise RuntimeError("No GPU detected.")
81
+ print("Detected {} GPUs.".format(num_gpus))
82
+
83
+ try:
84
+ gpu_ids = parse_gpu_selection(args.gpus, num_gpus)
85
+ except ValueError as parse_error:
86
+ raise RuntimeError(str(parse_error))
87
+
88
+ gpu_label = ", ".join(["cuda:{}".format(idx) for idx in gpu_ids])
89
+ print("Using GPUs: {}".format(gpu_label))
90
+
91
+ # --- 阶段一:显存占用 ---
92
+ print("\n--- Stage 1: Allocating memory on all GPUs ---")
93
+ tensors = [occupy_gpu_memory(gpu_id, args.fraction, args.extra_reserve_gb) for gpu_id in gpu_ids]
94
+
95
+ # --- 阶段二:算力保活 ---
96
+ print("\n--- Stage 2: Starting keep-alive compute task ---")
97
+ compute_tensors = []
98
+ for gpu_id in gpu_ids:
99
+ try:
100
+ torch.cuda.set_device(gpu_id)
101
+ compute_tensors.append(torch.randn(args.matrix_size, args.matrix_size, device="cuda:{}".format(gpu_id)))
102
+ except Exception:
103
+ compute_tensors.append(None)
104
+
105
+ print("Holding memory with a compute duty cycle of {}s work / {}s sleep.".format(
106
+ args.compute_sec, args.sleep_sec))
107
+ print("Press Ctrl+C to exit.")
108
+
109
+ try:
110
+ while True:
111
+ start_burst_time = time.time()
112
+
113
+ # 计算阶段
114
+ while time.time() - start_burst_time < args.compute_sec:
115
+ for idx, gpu_id in enumerate(gpu_ids):
116
+ if compute_tensors[idx] is not None:
117
+ try:
118
+ torch.cuda.set_device(gpu_id)
119
+ compute_tensors[idx] = torch.matmul(compute_tensors[idx], compute_tensors[idx].T)
120
+ compute_tensors[idx] = compute_tensors[idx] / (compute_tensors[idx].norm() + 1e-6)
121
+ except Exception as e:
122
+ print("Error during keep-alive on GPU {}: {}".format(gpu_id, e))
123
+ compute_tensors[idx] = None # 出错后停止在该 GPU 上的计算
124
+
125
+ # 同步并打印耗时
126
+ for idx, gpu_id in enumerate(gpu_ids):
127
+ if compute_tensors[idx] is not None: torch.cuda.synchronize(gpu_id)
128
+ actual_compute_time = time.time() - start_burst_time
129
+ print("[{}] Compute burst finished in {:.2f}s.".format(
130
+ datetime.datetime.now(), actual_compute_time), flush=True)
131
+
132
+ # 睡眠阶段
133
+ time.sleep(args.sleep_sec)
134
+
135
+ except KeyboardInterrupt:
136
+ print("\nExiting and releasing memory...")
137
+
138
+ if __name__ == "__main__":
139
+ parser = argparse.ArgumentParser(description="Occupy GPU memory and maintain a specified utilization duty cycle.")
140
+ parser.add_argument("--fraction", type=float, default=0.95, help="Fraction of total GPU memory to try to occupy.")
141
+ parser.add_argument("--extra_reserve_gb", type=int, default=2, help="Additional memory to reserve in GB.")
142
+ parser.add_argument("--matrix_size", type=int, default=4096, help="Matrix size for keep-alive computation (e.g., 2048, 4096).")
143
+ parser.add_argument("--compute_sec", type=float, default=5.0, help="Target duration (in seconds) for the computation burst.")
144
+ parser.add_argument("--sleep_sec", type=float, default=3.0, help="Duration (in seconds) to sleep after each burst.")
145
+ parser.add_argument("--gpus", type=str, default=None, help="Comma-separated GPU ids to occupy, e.g. '0,1' or 'cuda:0,cuda:1'. Default uses all GPUs.")
146
+
147
+ args = parser.parse_args()
148
+ main(args)