ZTWHHH commited on
Commit
acd09c6
·
verified ·
1 Parent(s): be382da

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/blackwhite.cpython-310.pyc +0 -0
  3. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/even_size.cpython-310.pyc +0 -0
  4. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/fadeout.cpython-310.pyc +0 -0
  5. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/freeze_region.cpython-310.pyc +0 -0
  6. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/gamma_corr.cpython-310.pyc +0 -0
  7. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/invert_colors.cpython-310.pyc +0 -0
  8. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/loop.cpython-310.pyc +0 -0
  9. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/lum_contrast.cpython-310.pyc +0 -0
  10. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/make_loopable.cpython-310.pyc +0 -0
  11. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/mask_color.cpython-310.pyc +0 -0
  12. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/mask_or.cpython-310.pyc +0 -0
  13. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/mirror_x.cpython-310.pyc +0 -0
  14. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/mirror_y.cpython-310.pyc +0 -0
  15. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/resize.cpython-310.pyc +0 -0
  16. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/rotate.cpython-310.pyc +0 -0
  17. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/supersample.cpython-310.pyc +0 -0
  18. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/time_mirror.cpython-310.pyc +0 -0
  19. videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/time_symmetrize.cpython-310.pyc +0 -0
  20. videollama2/lib/python3.10/site-packages/moviepy/video/fx/all/__init__.py +17 -0
  21. videollama2/lib/python3.10/site-packages/moviepy/video/fx/all/__pycache__/__init__.cpython-310.pyc +0 -0
  22. videollama2/lib/python3.10/site-packages/moviepy/video/fx/freeze.py +29 -0
  23. videollama2/lib/python3.10/site-packages/moviepy/video/fx/freeze_region.py +57 -0
  24. videollama2/lib/python3.10/site-packages/moviepy/video/fx/mask_color.py +34 -0
  25. videollama2/lib/python3.10/site-packages/moviepy/video/fx/resize.py +165 -0
  26. videollama2/lib/python3.10/site-packages/moviepy/video/fx/time_mirror.py +13 -0
  27. vllm/lib/python3.10/site-packages/cupy/_core/_routines_sorting.cpython-310-x86_64-linux-gnu.so +3 -0
  28. vllm/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc +0 -0
  29. vllm/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/control_plane.cpython-310.pyc +0 -0
  30. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/__init__.py +0 -0
  31. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/__pycache__/__init__.cpython-310.pyc +0 -0
  32. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__init__.py +41 -0
  33. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc +0 -0
  34. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc +0 -0
  35. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/health_check_server.cpython-310.pyc +0 -0
  36. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc +0 -0
  37. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py +942 -0
  38. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/health_check_server.py +65 -0
  39. vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py +410 -0
  40. vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py +170 -0
  41. vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc +0 -0
  42. vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc +0 -0
  43. vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py +114 -0
  44. vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py +22 -0
  45. vllm/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py +164 -0
  46. vllm/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py +216 -0
  49. vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc +0 -0
  50. vllm/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py +166 -0
.gitattributes CHANGED
@@ -1826,3 +1826,5 @@ parrot/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-l
1826
  parrot/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1827
  parrot/lib/python3.10/site-packages/scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1828
  videollama2/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
1826
  parrot/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1827
  parrot/lib/python3.10/site-packages/scipy/optimize/_trlib/_trlib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1828
  videollama2/lib/python3.10/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1829
+ vllm/lib/python3.10/site-packages/cupy/_core/_routines_sorting.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1830
+ vllm/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/blackwhite.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/even_size.cpython-310.pyc ADDED
Binary file (895 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/fadeout.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/freeze_region.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/gamma_corr.cpython-310.pyc ADDED
Binary file (521 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/invert_colors.cpython-310.pyc ADDED
Binary file (633 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/loop.cpython-310.pyc ADDED
Binary file (990 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/lum_contrast.cpython-310.pyc ADDED
Binary file (661 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/make_loopable.cpython-310.pyc ADDED
Binary file (773 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/mask_color.cpython-310.pyc ADDED
Binary file (1.3 kB). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/mask_or.cpython-310.pyc ADDED
Binary file (909 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/mirror_x.cpython-310.pyc ADDED
Binary file (522 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/mirror_y.cpython-310.pyc ADDED
Binary file (510 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/resize.cpython-310.pyc ADDED
Binary file (4.12 kB). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/rotate.cpython-310.pyc ADDED
Binary file (2.33 kB). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/supersample.cpython-310.pyc ADDED
Binary file (922 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/time_mirror.cpython-310.pyc ADDED
Binary file (768 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/__pycache__/time_symmetrize.cpython-310.pyc ADDED
Binary file (837 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/all/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Loads all the fx !
3
+ Usage:
4
+ import moviepy.video.fx.all as vfx
5
+ clip = vfx.resize(some_clip, width=400)
6
+ clip = vfx.mirror_x(some_clip)
7
+ """
8
+
9
+ import pkgutil
10
+
11
+ import moviepy.video.fx as fx
12
+
13
+ __all__ = [name for _, name, _ in pkgutil.iter_modules(
14
+ fx.__path__) if name != "all"]
15
+
16
+ for name in __all__:
17
+ exec("from ..%s import %s" % (name, name))
videollama2/lib/python3.10/site-packages/moviepy/video/fx/all/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (656 Bytes). View file
 
videollama2/lib/python3.10/site-packages/moviepy/video/fx/freeze.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from moviepy.decorators import requires_duration
2
+ from moviepy.video.compositing.concatenate import concatenate_videoclips
3
+ from moviepy.video.VideoClip import ImageClip
4
+
5
+
6
+ @requires_duration
7
+ def freeze(clip, t=0, freeze_duration=None, total_duration=None,
8
+ padding_end=0):
9
+ """ Momentarily freeze the clip at time t.
10
+
11
+ Set `t='end'` to freeze the clip at the end (actually it will freeze on the
12
+ frame at time clip.duration - padding_end seconds).
13
+ With ``duration``you can specify the duration of the freeze.
14
+ With ``total_duration`` you can specify the total duration of
15
+ the clip and the freeze (i.e. the duration of the freeze is
16
+ automatically calculated). One of them must be provided.
17
+ """
18
+
19
+ if t=='end':
20
+ t = clip.duration - padding_end
21
+
22
+ if freeze_duration is None:
23
+ freeze_duration = total_duration - clip.duration
24
+
25
+ before = [clip.subclip(0,t)] if (t!=0) else []
26
+ freeze = [clip.to_ImageClip(t).set_duration(freeze_duration)]
27
+ after = [clip.subclip(t)] if (t !=clip.duration) else []
28
+ return concatenate_videoclips(before + freeze + after)
29
+
videollama2/lib/python3.10/site-packages/moviepy/video/fx/freeze_region.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from moviepy.decorators import apply_to_mask
2
+ from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
3
+
4
+ from .crop import crop
5
+
6
+
7
+ #@apply_to_mask
8
+ def freeze_region(clip, t=0, region=None, outside_region=None, mask=None):
9
+ """ Freezes one region of the clip while the rest remains animated.
10
+
11
+ You can choose one of three methods by providing either `region`,
12
+ `outside_region`, or `mask`.
13
+
14
+ Parameters
15
+ -----------
16
+
17
+ t
18
+ Time at which to freeze the freezed region.
19
+
20
+ region
21
+ A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels)
22
+ which will be freezed. You can provide outside_region or mask instead.
23
+
24
+ outside_region
25
+ A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels)
26
+ which will be the only non-freezed region.
27
+
28
+ mask
29
+ If not None, will overlay a freezed version of the clip on the current clip,
30
+ with the provided mask. In other words, the "visible" pixels in the mask
31
+ indicate the freezed region in the final picture.
32
+
33
+ """
34
+
35
+ if region is not None:
36
+
37
+ x1, y1, x2, y2 = region
38
+ freeze = (clip.fx(crop, *region)
39
+ .to_ImageClip(t=t)
40
+ .set_duration(clip.duration)
41
+ .set_position((x1,y1)))
42
+ return CompositeVideoClip([clip, freeze])
43
+
44
+ elif outside_region is not None:
45
+
46
+ x1, y1, x2, y2 = outside_region
47
+ animated_region = (clip.fx(crop, *outside_region)
48
+ .set_position((x1,y1)))
49
+ freeze = (clip.to_ImageClip(t=t)
50
+ .set_duration(clip.duration))
51
+ return CompositeVideoClip([freeze, animated_region])
52
+
53
+ elif mask is not None:
54
+ freeze = (clip.to_ImageClip(t=t)
55
+ .set_duration(clip.duration)
56
+ .set_mask(mask))
57
+ return CompositeVideoClip([clip, freeze])
videollama2/lib/python3.10/site-packages/moviepy/video/fx/mask_color.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def mask_color(clip, color=None, thr=0, s=1):
5
+ """ Returns a new clip with a mask for transparency where the original
6
+ clip is of the given color.
7
+
8
+ You can also have a "progressive" mask by specifying a non-nul distance
9
+ threshold thr. In this case, if the distance between a pixel and the given
10
+ color is d, the transparency will be
11
+
12
+ d**s / (thr**s + d**s)
13
+
14
+ which is 1 when d>>thr and 0 for d<<thr, the stiffness of the effect being
15
+ parametrized by s
16
+ """
17
+ if color is None:
18
+ color = [0,0,0]
19
+
20
+ color = np.array(color)
21
+
22
+ def hill(x):
23
+ if thr:
24
+ return x**s / (thr**s + x**s)
25
+ else:
26
+ return 1.0 * (x != 0)
27
+
28
+ def flim(im):
29
+ return hill(np.sqrt(((im-color)**2).sum(axis=2)))
30
+
31
+ mask = clip.fl_image(flim)
32
+ mask.ismask= True
33
+ newclip = clip.set_mask(mask)
34
+ return newclip
videollama2/lib/python3.10/site-packages/moviepy/video/fx/resize.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ resize_possible = True
2
+
3
+ try:
4
+ # TRY USING OpenCV AS RESIZER
5
+ #raise ImportError #debugging
6
+ import cv2
7
+ import numpy as np
8
+ def resizer (pic, newsize):
9
+ lx, ly = int(newsize[0]), int(newsize[1])
10
+ if lx > pic.shape[1] or ly > pic.shape[0]:
11
+ # For upsizing use linear for good quality & decent speed
12
+ interpolation = cv2.INTER_LINEAR
13
+ else:
14
+ # For dowsizing use area to prevent aliasing
15
+ interpolation = cv2.INTER_AREA
16
+ return cv2.resize(+pic.astype('uint8'), (lx, ly),
17
+ interpolation=interpolation)
18
+
19
+ resizer.origin = "cv2"
20
+
21
+ except ImportError:
22
+
23
+
24
+ try:
25
+ # TRY USING PIL/PILLOW AS RESIZER
26
+ from PIL import Image
27
+ import numpy as np
28
+ def resizer(pic, newsize):
29
+ newsize = list(map(int, newsize))[::-1]
30
+ shape = pic.shape
31
+ if len(shape)==3:
32
+ newshape = (newsize[0],newsize[1], shape[2] )
33
+ else:
34
+ newshape = (newsize[0],newsize[1])
35
+
36
+ pilim = Image.fromarray(pic)
37
+ resized_pil = pilim.resize(newsize[::-1], Image.ANTIALIAS)
38
+ #arr = np.fromstring(resized_pil.tostring(), dtype='uint8')
39
+ #arr.reshape(newshape)
40
+ return np.array(resized_pil)
41
+
42
+ resizer.origin = "PIL"
43
+
44
+ except ImportError:
45
+ # TRY USING SCIPY AS RESIZER
46
+ try:
47
+ from scipy.misc import imresize
48
+ resizer = lambda pic, newsize : imresize(pic,
49
+ map(int, newsize[::-1]))
50
+ resizer.origin = "Scipy"
51
+
52
+ except ImportError:
53
+ resize_possible = False
54
+
55
+
56
+
57
+
58
+ from moviepy.decorators import apply_to_mask
59
+
60
+
61
+ def resize(clip, newsize=None, height=None, width=None, apply_to_mask=True):
62
+ """
63
+ Returns a video clip that is a resized version of the clip.
64
+
65
+ Parameters
66
+ ------------
67
+
68
+ newsize:
69
+ Can be either
70
+ - ``(width,height)`` in pixels or a float representing
71
+ - A scaling factor, like 0.5
72
+ - A function of time returning one of these.
73
+
74
+ width:
75
+ width of the new clip in pixel. The height is then computed so
76
+ that the width/height ratio is conserved.
77
+
78
+ height:
79
+ height of the new clip in pixel. The width is then computed so
80
+ that the width/height ratio is conserved.
81
+
82
+ Examples
83
+ ----------
84
+
85
+ >>> myClip.resize( (460,720) ) # New resolution: (460,720)
86
+ >>> myClip.resize(0.6) # width and heigth multiplied by 0.6
87
+ >>> myClip.resize(width=800) # height computed automatically.
88
+ >>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
89
+
90
+ """
91
+
92
+ w, h = clip.size
93
+
94
+ if newsize is not None:
95
+
96
+ def trans_newsize(ns):
97
+
98
+ if isinstance(ns, (int, float)):
99
+ return [ns * w, ns * h]
100
+ else:
101
+ return ns
102
+
103
+ if hasattr(newsize, "__call__"):
104
+
105
+ newsize2 = lambda t : trans_newsize(newsize(t))
106
+
107
+ if clip.ismask:
108
+
109
+ fun = lambda gf,t: (1.0*resizer((255 * gf(t)).astype('uint8'),
110
+ newsize2(t))/255)
111
+ else:
112
+
113
+ fun = lambda gf,t: resizer(gf(t).astype('uint8'),
114
+ newsize2(t))
115
+
116
+ return clip.fl(fun, keep_duration=True,
117
+ apply_to= (["mask"] if apply_to_mask else []))
118
+
119
+ else:
120
+
121
+ newsize = trans_newsize(newsize)
122
+
123
+
124
+ elif height is not None:
125
+
126
+ if hasattr(height, "__call__"):
127
+ fun = lambda t : 1.0*int(height(t))/h
128
+ return resize(clip, fun)
129
+
130
+
131
+ else:
132
+
133
+ newsize = [w * height / h, height]
134
+
135
+ elif width is not None:
136
+
137
+ if hasattr(width, "__call__"):
138
+ fun = lambda t : 1.0*width(t)/w
139
+ return resize(clip, fun)
140
+
141
+ newsize = [width, h * width / w]
142
+
143
+
144
+ # From here, the resizing is constant (not a function of time), size=newsize
145
+
146
+ if clip.ismask:
147
+ fl = lambda pic: 1.0*resizer((255 * pic).astype('uint8'), newsize)/255.0
148
+
149
+ else:
150
+ fl = lambda pic: resizer(pic.astype('uint8'), newsize)
151
+
152
+ newclip = clip.fl_image(fl)
153
+
154
+ if apply_to_mask and clip.mask is not None:
155
+ newclip.mask = resize(clip.mask, newsize, apply_to_mask=False)
156
+
157
+ return newclip
158
+
159
+
160
+ if not resize_possible:
161
+
162
+ doc = resize.__doc__
163
+ def resize(clip, newsize=None, height=None, width=None):
164
+ raise ImportError("fx resize needs OpenCV or Scipy or PIL")
165
+ resize.__doc__ = doc
videollama2/lib/python3.10/site-packages/moviepy/video/fx/time_mirror.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from moviepy.decorators import apply_to_audio, apply_to_mask, requires_duration
2
+
3
+
4
+ @requires_duration
5
+ @apply_to_mask
6
+ @apply_to_audio
7
+ def time_mirror(self):
8
+ """
9
+ Returns a clip that plays the current clip backwards.
10
+ The clip must have its ``duration`` attribute set.
11
+ The same effect is applied to the clip's audio and mask if any.
12
+ """
13
+ return self.fl_time(lambda t: self.duration - t, keep_duration=True)
vllm/lib/python3.10/site-packages/cupy/_core/_routines_sorting.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:847d5aaca69b108fc48141178cb6827097ed2862bfe091c35ee23521c61b4c5d
3
+ size 699944
vllm/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/__pycache__/control_plane.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__init__.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ """
10
+ The elastic agent is the control plane of torchelastic.
11
+
12
+ It is a process that launches and manages underlying worker processes.
13
+ The agent is responsible for:
14
+
15
+ 1. Working with distributed torch: the workers are started with all the
16
+ necessary information to successfully and trivially call
17
+ ``torch.distributed.init_process_group()``.
18
+
19
+ 2. Fault tolerance: monitors workers and upon detecting worker failures
20
+ or unhealthiness, tears down all workers and restarts everyone.
21
+
22
+ 3. Elasticity: Reacts to membership changes and restarts workers with the new
23
+ members.
24
+
25
+ The simplest agents are deployed per node and works with local processes.
26
+ A more advanced agent can launch and manage workers remotely. Agents can
27
+ be completely decentralized, making decisions based on the workers it manages.
28
+ Or can be coordinated, communicating to other agents (that manage workers
29
+ in the same job) to make a collective decision.
30
+ """
31
+
32
+ from .api import ( # noqa: F401
33
+ ElasticAgent,
34
+ RunResult,
35
+ SimpleElasticAgent,
36
+ Worker,
37
+ WorkerGroup,
38
+ WorkerSpec,
39
+ WorkerState,
40
+ )
41
+ from .local_elastic_agent import TORCHELASTIC_ENABLE_FILE_TIMER, TORCHELASTIC_TIMER_FILE
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/api.cpython-310.pyc ADDED
Binary file (30.5 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/health_check_server.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/__pycache__/local_elastic_agent.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/api.py ADDED
@@ -0,0 +1,942 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import abc
10
+ import json
11
+ import os
12
+ import signal
13
+ import socket
14
+ import time
15
+ import traceback
16
+ import warnings
17
+ from collections import defaultdict
18
+ from contextlib import contextmanager
19
+ from dataclasses import dataclass, field
20
+ from enum import Enum
21
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
22
+
23
+ import torch.distributed.elastic.rendezvous as rdzv
24
+ import torch.distributed.elastic.utils.store as store_util
25
+ from torch.distributed.elastic.events import Event, EventSource, record
26
+ from torch.distributed.elastic.metrics import prof, put_metric
27
+ from torch.distributed.elastic.multiprocessing import ProcessFailure, SignalException
28
+ from torch.distributed.elastic.rendezvous import RendezvousGracefulExitError
29
+ from torch.distributed.elastic.utils.logging import get_logger
30
+
31
+
32
+ __all__ = [
33
+ "WorkerSpec",
34
+ "Worker",
35
+ "WorkerState",
36
+ "WorkerGroup",
37
+ "RunResult",
38
+ "ElasticAgent",
39
+ "SimpleElasticAgent",
40
+ ]
41
+ _TERMINAL_STATE_SYNC_ID = "torchelastic/agent/terminal_state"
42
+
43
+ DEFAULT_ROLE = "default"
44
+ logger = get_logger(__name__)
45
+
46
+
47
+ @dataclass
48
+ class WorkerSpec:
49
+ """Blueprint information about a particular type of worker.
50
+
51
+ For a given role, there must only exist a single worker spec.
52
+ Worker spec is expected to be homogeneous across all nodes (machine),
53
+ that is each node runs the same number of workers for a particular spec.
54
+
55
+ Args:
56
+ role: user-defined role for the workers with this spec
57
+ local_world_size: number local workers to run
58
+ fn: (deprecated use entrypoint instead)
59
+ entrypoint: worker function or command
60
+ args: arguments to pass to ``entrypoint``
61
+ rdzv_handler: handles rdzv for this set of workers
62
+ max_restarts: number of max retries for the workers
63
+ monitor_interval: monitor status of workers every ``n`` seconds
64
+ master_port: fixed port to run the c10d store on rank 0
65
+ if not specified then will chose a random free port
66
+ master_addr: fixed master_addr to run the c10d store on rank 0
67
+ if not specified then will chose hostname on agent rank 0
68
+ redirects: redirect std streams to a file,
69
+ selectively redirect for a particular
70
+ local rank by passing a map
71
+ tee: tees the specified std stream(s) to console + file,
72
+ selectively tee for a particular local rank by passing a map,
73
+ takes precedence over ``redirects`` settings.
74
+
75
+ """
76
+
77
+ role: str
78
+ local_world_size: int
79
+ rdzv_handler: rdzv.RendezvousHandler
80
+ fn: Optional[Callable] = None
81
+ # TODO @kiuk - make entrypoint a required field
82
+ entrypoint: Union[Callable, str, None] = None
83
+ args: Tuple = ()
84
+ max_restarts: int = 3
85
+ monitor_interval: float = 0.1
86
+ master_port: Optional[int] = None
87
+ master_addr: Optional[str] = None
88
+ local_addr: Optional[str] = None
89
+
90
+ def __post_init__(self):
91
+ assert self.local_world_size > 0
92
+ assert self.monitor_interval > 0
93
+
94
+ if self.fn:
95
+ warnings.warn(
96
+ "WorkerSpec.fn will be deprecated,"
97
+ " please use WorkerSpec.entrypoint instead",
98
+ category=DeprecationWarning,
99
+ )
100
+ self.entrypoint = self.fn
101
+ assert self.entrypoint
102
+
103
+ def get_entrypoint_name(self):
104
+ """Get the entry point name.
105
+
106
+ If the entrypoint is a function (e.g. ``Callable``) returns its ``__qualname__``
107
+ else if the entrypoint is a binary (e.g. ``str``), returns the binary name.
108
+ """
109
+ if isinstance(self.entrypoint, str):
110
+ return os.path.basename(self.entrypoint)
111
+ else:
112
+ assert self.entrypoint is not None
113
+ return self.entrypoint.__qualname__
114
+
115
+
116
+ class Worker:
117
+ """A worker instance.
118
+
119
+ Contrast this with ``WorkerSpec`` that represents the specifications of a
120
+ worker. A ``Worker`` is created from a ``WorkerSpec``. A ``Worker`` is to
121
+ a ``WorkerSpec`` as an object is to a class.
122
+
123
+ The ``id`` of the worker is interpreted
124
+ by the specific implementation of ``ElasticAgent``. For a local
125
+ agent, it could be the ``pid (int)`` of the worker, for a remote
126
+ agent it could be encoded as ``host:port (string)``.
127
+
128
+ Args:
129
+ id (Any): uniquely identifies a worker (interpreted by the agent)
130
+ local_rank (int): local rank of the worker
131
+ global_rank (int): global rank of the worker
132
+ role_rank (int): rank of the worker across all workers that have the same role
133
+ world_size (int): number of workers (globally)
134
+ role_world_size (int): number of workers that have the same role
135
+ """
136
+
137
+ __slots__ = [
138
+ "id",
139
+ "local_rank",
140
+ "global_rank",
141
+ "role_rank",
142
+ "world_size",
143
+ "role_world_size",
144
+ ]
145
+
146
+ def __init__(
147
+ self,
148
+ local_rank: int,
149
+ global_rank: int = -1,
150
+ role_rank: int = -1,
151
+ world_size: int = -1,
152
+ role_world_size: int = -1,
153
+ ):
154
+ # unique identifier for this worker
155
+ self.id: Any = None
156
+
157
+ # rank of the worker among workers with the same role being monitored
158
+ # by the same ``agent`` instance.
159
+ self.local_rank: int = local_rank
160
+
161
+ # rank of the worker among all the workers across all roles
162
+ # across all ``agent`` instances.
163
+ # Global rank is not stable between re-rendezvous.
164
+ self.global_rank: int = global_rank
165
+
166
+ # rank of the worker among all the workers with the same role
167
+ # across all ``agent`` instances.
168
+ # Role rank is not stable between re-rendezvous.
169
+ self.role_rank: int = role_rank
170
+
171
+ # total number of workers (globally). Due to elasticity
172
+ # the world size may change between re-rendezvous.
173
+ self.world_size: int = world_size
174
+
175
+ # total number of workers that share the same role. Due to elasticity
176
+ # the role world size may change between re-rendezvous.
177
+ self.role_world_size: int = role_world_size
178
+
179
+ def __str__(self):
180
+ return (
181
+ f"local_rank={self.local_rank},global_rank={self.global_rank}"
182
+ f",role_rank={self.role_rank},world_size={self.world_size}"
183
+ f",role_world_size={self.role_world_size}"
184
+ )
185
+
186
+ def __repr__(self):
187
+ return str(self)
188
+
189
+
190
+ class WorkerState(str, Enum):
191
+ """A state of the ``WorkerGroup``.
192
+
193
+ Workers in a worker group change state as a unit. If a single worker
194
+ in a worker group fails the entire set is considered failed::
195
+
196
+ UNKNOWN - agent lost track of worker group state, unrecoverable
197
+ INIT - worker group object created not yet started
198
+ HEALTHY - workers running and healthy
199
+ UNHEALTHY - workers running and unhealthy
200
+ STOPPED - workers stopped (interrupted) by the agent
201
+ SUCCEEDED - workers finished running (exit 0)
202
+ FAILED - workers failed to successfully finish (exit !0)
203
+
204
+
205
+ A worker group starts from an initial ``INIT`` state,
206
+ then progresses to ``HEALTHY`` or ``UNHEALTHY`` states,
207
+ and finally reaches a terminal ``SUCCEEDED`` or ``FAILED`` state.
208
+
209
+ Worker groups can be interrupted and temporarily put into ``STOPPED`` state
210
+ by the agent. Workers in ``STOPPED`` state are scheduled to be restarted
211
+ in the near future by the agent. Some examples of workers being put into
212
+ ``STOPPED`` state are:
213
+
214
+ 1. Worker group failure|unhealthy observed
215
+ 2. Membership change detected
216
+
217
+ When actions (start, stop, rdzv, retry, etc) on worker group fails
218
+ and results in the action being partially applied to the worker group
219
+ the state will be ``UNKNOWN``. Typically this happens on uncaught/unhandled
220
+ exceptions during state change events on the agent. The agent is not
221
+ expected to recover worker groups in ``UNKNOWN`` state and is better off
222
+ self terminating and allowing the job manager to retry the node.
223
+ """
224
+
225
+ UNKNOWN = "UNKNOWN"
226
+ INIT = "INIT"
227
+ HEALTHY = "HEALTHY"
228
+ UNHEALTHY = "UNHEALTHY"
229
+ STOPPED = "STOPPED"
230
+ SUCCEEDED = "SUCCEEDED"
231
+ FAILED = "FAILED"
232
+
233
+ @staticmethod
234
+ def is_running(state: "WorkerState") -> bool:
235
+ """Return the state of the Worker.
236
+
237
+ Returns:
238
+ True if the worker state represents workers still running
239
+ (e.g. that the process exists but not necessarily healthy).
240
+ """
241
+ return state in {WorkerState.HEALTHY, WorkerState.UNHEALTHY}
242
+
243
+
244
+ class WorkerGroup:
245
+ """A set of ``Worker`` instances.
246
+
247
+ The class defines a set of ``Worker`` instances for the given ``WorkerSpec`` managed by ``ElasticAgent``. Whether the worker
248
+ group contains cross instance workers or not depends on the implementation of the agent.
249
+ """
250
+
251
+ __slots__ = [
252
+ "spec",
253
+ "workers",
254
+ "store",
255
+ "group_rank",
256
+ "group_world_size",
257
+ "state",
258
+ "master_addr",
259
+ "master_port",
260
+ ]
261
+
262
+ def __init__(self, spec: WorkerSpec):
263
+ self.spec = spec
264
+ self.workers = [Worker(local_rank=i) for i in range(self.spec.local_world_size)]
265
+
266
+ # assigned after rdzv
267
+ self.store = None
268
+ self.group_rank = None
269
+ self.group_world_size = None
270
+ self.master_addr = None
271
+ self.master_port = None
272
+
273
+ self.state = WorkerState.INIT
274
+
275
+
276
+ class _RoleInstanceInfo:
277
+ """The class is used by the agent to exchange the information with other agents.
278
+
279
+ The information is used to determine the rank of the workers that agent
280
+ manages in heterogeneous environments, where different agents can have
281
+ different number of workers.
282
+ """
283
+
284
+ __slots__ = ["role", "rank", "local_world_size"]
285
+
286
+ def __init__(self, role: str, rank: int, local_world_size: int):
287
+ r"""Initialize the agent class instance.
288
+
289
+ Args:
290
+ role (str): user-defined role for the workers with this spec
291
+ rank (int): the rank of the agent
292
+ local_world_size (int): number of local workers to run
293
+ """
294
+ self.role = role
295
+ self.rank = rank
296
+ self.local_world_size = local_world_size
297
+
298
+ def serialize(self) -> bytes:
299
+ dict_data = {
300
+ "role": self.role,
301
+ "rank": self.rank,
302
+ "local_world_size": self.local_world_size,
303
+ }
304
+ return json.dumps(dict_data).encode(encoding="UTF-8")
305
+
306
+ @staticmethod
307
+ def deserialize(data: bytes):
308
+ dict_data = json.loads(data.decode(encoding="UTF-8"))
309
+ return _RoleInstanceInfo(
310
+ dict_data["role"], dict_data["rank"], dict_data["local_world_size"]
311
+ )
312
+
313
+ @staticmethod
314
+ def compare(obj1, obj2) -> int:
315
+ if obj1.role == obj2.role:
316
+ return obj1.rank - obj2.rank
317
+ elif obj1.role > obj2.role:
318
+ return 1
319
+ else:
320
+ return -1
321
+
322
+ @staticmethod
323
+ def find_role_boundaries(roles_infos: List, role: str) -> Tuple[int, int]:
324
+ start_idx, end_idx = -1, -1
325
+ for idx, role_info in enumerate(roles_infos):
326
+ if role_info.role == role:
327
+ if start_idx == -1:
328
+ start_idx = idx
329
+ end_idx = idx
330
+ return (start_idx, end_idx)
331
+
332
+
333
+ @dataclass
334
+ class RunResult:
335
+ """Return results of the worker executions.
336
+
337
+ Run results follow an "all-or-nothing" policy where the run is successful if and
338
+ only if ALL local workers managed by this agent complete successfully.
339
+
340
+ If the result is successful (e.g. ``is_failed() = False``) then the ``return_values``
341
+ field contains the outputs (return values) of the workers managed by THIS agent mapped
342
+ by their GLOBAL ranks. That is ``result.return_values[0]`` is the return value of
343
+ global rank 0.
344
+
345
+ .. note:: ``return_values`` are only meaningful for when the worker entrypoint
346
+ is a function. Workers specified as a binary entrypoint do not canonically
347
+ have a return value and the ``return_values`` field is meaningless and
348
+ may be empty.
349
+
350
+ If ``is_failed()`` returns ``True`` then the ``failures`` field contains the
351
+ failure information, again, mapped by the GLOBAL rank of the worker that failed.
352
+
353
+ The keys in ``return_values`` and ``failures`` are mutually exclusive, that is,
354
+ a worker's final state can only be one of: succeeded, failed. Workers intentionally
355
+ terminated by the agent according to the agent's restart policy, are not represented
356
+ in either ``return_values`` nor ``failures``.
357
+ """
358
+
359
+ state: WorkerState
360
+ return_values: Dict[int, Any] = field(default_factory=dict)
361
+ failures: Dict[int, ProcessFailure] = field(default_factory=dict)
362
+
363
+ def is_failed(self) -> bool:
364
+ return self.state == WorkerState.FAILED
365
+
366
+
367
+ def _get_fq_hostname() -> str:
368
+ return socket.getfqdn(socket.gethostname())
369
+
370
+
371
+ class ElasticAgent(abc.ABC):
372
+ """An agent process responsible for managing one or more worker processes.
373
+
374
+ The worker processes are assumed to be regular distributed PyTorch scripts.
375
+ When the worker process is created by the agent, the agent provides the
376
+ necessary information for the worker processes to properly initialize
377
+ a torch process group.
378
+
379
+ The exact deployment topology and ratio of agent-to-worker is dependent
380
+ on the specific implementation of the agent and the user's job placement
381
+ preferences. For instance, to run a distributed training job on GPU with
382
+ 8 trainers (one per GPU) one can:
383
+
384
+ 1. Use 8 x single GPU instances, place an agent per instance, managing
385
+ 1 worker per agent.
386
+ 2. Use 4 x double GPU instances, place an agent per instance, managing
387
+ 2 workers per agent.
388
+ 3. Use 2 x quad GPU instances, place an agent per instance, managing
389
+ 4 workers per agent.
390
+ 4. Use 1 x 8 GPU instance, place an agent per instance, managing
391
+ 8 workers per agent.
392
+
393
+ Usage
394
+ ::
395
+
396
+ group_result = agent.run()
397
+ if group_result.is_failed():
398
+ # workers failed
399
+ failure = group_result.failures[0]
400
+ logger.exception("worker 0 failed with exit code : %s", failure.exit_code)
401
+ else:
402
+ return group_result.return_values[0] # return rank 0's results
403
+
404
+ """
405
+
406
+ @abc.abstractmethod
407
+ def run(self, role: str = DEFAULT_ROLE) -> RunResult:
408
+ """Run the agent.
409
+
410
+ Supports retrying the worker group on failures up to ``max_restarts``.
411
+
412
+ Returns:
413
+ The result of the execution, containing the return values or
414
+ failure details for each worker mapped by the worker's global rank.
415
+
416
+ Raises:
417
+ Exception - any other failures NOT related to worker process
418
+ """
419
+ raise NotImplementedError
420
+
421
+ @abc.abstractmethod
422
+ def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup:
423
+ """Return the ``WorkerGroup`` for the given ``role``.
424
+
425
+ Note that the worker group is a mutable object and hence in a
426
+ multi-threaded/process environment it may change state.
427
+ Implementors are encouraged (but not required) to return
428
+ a defensive read-only copy.
429
+ """
430
+ raise NotImplementedError
431
+
432
+
433
+ class SimpleElasticAgent(ElasticAgent):
434
+ """An ``ElasticAgent`` that manages one particular type of worker role.
435
+
436
+ An ``ElasticAgent`` that manages workers (``WorkerGroup``) for a single ``WorkerSpec``
437
+ such as one particular type of worker role.
438
+ """
439
+
440
+ def __init__(self, spec: WorkerSpec, exit_barrier_timeout: float = 300):
441
+ self._worker_group = WorkerGroup(spec)
442
+ self._remaining_restarts = self._worker_group.spec.max_restarts
443
+ self._store = None
444
+ self._exit_barrier_timeout = exit_barrier_timeout
445
+ self._total_execution_time = 0
446
+
447
+ def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup:
448
+ return self._worker_group
449
+
450
+ @abc.abstractmethod
451
+ def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
452
+ r"""Start ``worker_group.spec.local_world_size`` number of workers.
453
+
454
+ This is according to worker spec for the worker group .
455
+ Returns a map of ``local_rank`` to worker ``id``.
456
+ """
457
+ raise NotImplementedError
458
+
459
+ @abc.abstractmethod
460
+ def _stop_workers(
461
+ self, worker_group: WorkerGroup, is_restart: bool = False
462
+ ) -> None:
463
+ r"""Stop all workers in the given worker group.
464
+
465
+ Implementors must deal with workers in all states defined by
466
+ ``WorkerState``. That is, it must gracefully handle stopping
467
+ non-existent workers, unhealthy (stuck) workers, etc.
468
+ """
469
+ raise NotImplementedError
470
+
471
+ @abc.abstractmethod
472
+ def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
473
+ r"""Check on the workers for the ``worker_group``.
474
+
475
+ This function also returns the new state of the worker group.
476
+ """
477
+ raise NotImplementedError
478
+
479
+ @abc.abstractmethod
480
+ def _shutdown(
481
+ self, death_sig: signal.Signals = signal.SIGTERM, is_restart: bool = False
482
+ ) -> None:
483
+ """Clean up any resources that were allocated during the agent's work.
484
+
485
+ Args:
486
+ death_sig: Signal to send to the child process, SIGTERM is default
487
+ """
488
+ raise NotImplementedError
489
+
490
+ @prof
491
+ def _rendezvous(self, worker_group: WorkerGroup) -> None:
492
+ r"""Run rendezvous for the workers specified by the worker spec.
493
+
494
+ Assigns workers a new global rank and world size.
495
+ Updates the rendezvous store for the worker group.
496
+ """
497
+ spec = worker_group.spec
498
+
499
+ with self.record_duration("RENDEZVOUS"):
500
+ rdzv_info = spec.rdzv_handler.next_rendezvous()
501
+ store = rdzv_info.store
502
+ group_rank = rdzv_info.rank
503
+ group_world_size = rdzv_info.world_size
504
+
505
+ # master_addr/master_port could be explicitly overriden
506
+ # TODO: BC - specific to static rdzv and can be simplifed further
507
+ master_addr = spec.master_addr or rdzv_info.bootstrap_store_info.master_addr
508
+ master_port = spec.master_port or rdzv_info.bootstrap_store_info.master_port
509
+
510
+ self._store = store
511
+
512
+ with self.record_duration("ASSIGN_WORKER_RANKS"):
513
+ workers = self._assign_worker_ranks(
514
+ store, group_rank, group_world_size, spec
515
+ )
516
+ worker_group.workers = workers
517
+ worker_group.store = store
518
+ worker_group.group_rank = group_rank
519
+ worker_group.group_world_size = group_world_size
520
+ worker_group.master_addr = master_addr
521
+ worker_group.master_port = master_port
522
+
523
+ restart_count = spec.max_restarts - self._remaining_restarts
524
+
525
+ logger.info(
526
+ "[%(role)s] Rendezvous complete for workers. Result:\n"
527
+ " restart_count=%(restart_count)s\n"
528
+ " master_addr=%(master_addr)s\n"
529
+ " master_port=%(master_port)s\n"
530
+ " group_rank=%(group_rank)s\n"
531
+ " group_world_size=%(group_world_size)s\n"
532
+ " local_ranks=%(local_ranks)s\n"
533
+ " role_ranks=%(role_ranks)s\n"
534
+ " global_ranks=%(global_ranks)s\n"
535
+ " role_world_sizes=%(role_world_sizes)s\n"
536
+ " global_world_sizes=%(global_world_sizes)s\n",
537
+ {
538
+ "role": spec.role,
539
+ "restart_count": restart_count,
540
+ "master_addr": master_addr,
541
+ "master_port": master_port,
542
+ "group_rank": group_rank,
543
+ "group_world_size": group_world_size,
544
+ "local_ranks": [worker.local_rank for worker in workers],
545
+ "role_ranks": [worker.role_rank for worker in workers],
546
+ "global_ranks": [worker.global_rank for worker in workers],
547
+ "role_world_sizes": [worker.role_world_size for worker in workers],
548
+ "global_world_sizes": [worker.world_size for worker in workers],
549
+ },
550
+ )
551
+
552
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
553
+ # `torch.distributed.elastic.metrics.prof`.
554
+ @prof
555
+ def _assign_worker_ranks(
556
+ self, store, group_rank: int, group_world_size: int, spec: WorkerSpec
557
+ ) -> List[Worker]:
558
+ """Determine proper ranks for worker processes.
559
+
560
+ The rank assignment is done according to the following algorithm:
561
+
562
+ 1. Each agent writes its configuration(group_rank, group_world_size
563
+ , num_workers) to the common store.
564
+ 2. The rank 0 agent reads all the role_info from the store and
565
+ determines each agents worker ranks.
566
+ 3. Determine the global rank: the global rank of the workers is computed
567
+ by cumulative sum of the local_world_size for all workers in front of it.
568
+ For efficiency reasons each worker is assigned a base global rank
569
+ such that it's workers are in the range [base_global_rank,
570
+ base_global_rank + local_world_size).
571
+ 4. Determine the role rank: The role rank is determined using the algorithms
572
+ in the point 3 with the exception that the ranks are calculated with
573
+ respect to the role name.
574
+ 5. The rank 0 agent writes the assigned ranks to the store.
575
+ 6. Each agent reads the assigned ranks from the store.
576
+
577
+ Time complexity: each worker O(1), rank0 O(n), overall O(n)
578
+ """
579
+
580
+ ROLE_INFO_PREFIX = "torchelastic/role_info/"
581
+ ASSIGNED_RANKS_PREFIX = "torchelastic/assigned_ranks/"
582
+
583
+ agent_role_info = _RoleInstanceInfo(
584
+ spec.role, group_rank, spec.local_world_size
585
+ )
586
+ store.set(f"{ROLE_INFO_PREFIX}{group_rank}", agent_role_info.serialize())
587
+
588
+ # tcp store is collocated with rank 0 so we can use it to do extra compute to reduce overall # of operations.
589
+ if group_rank == 0:
590
+ role_infos_bytes = store.multi_get(
591
+ [f"torchelastic/role_info/{i}" for i in range(group_world_size)]
592
+ )
593
+ role_infos = [
594
+ _RoleInstanceInfo.deserialize(info_bytes)
595
+ for info_bytes in role_infos_bytes
596
+ ]
597
+
598
+ role_sizes = defaultdict(lambda: 0)
599
+ global_size = 0
600
+ for role_info in role_infos:
601
+ role_sizes[role_info.role] += role_info.local_world_size
602
+ global_size += role_info.local_world_size
603
+
604
+ base_global_rank = 0
605
+ role_ranks = defaultdict(lambda: 0)
606
+
607
+ keys = []
608
+ values = []
609
+ for i, role_info in enumerate(role_infos):
610
+ keys.append(f"{ASSIGNED_RANKS_PREFIX}{i}")
611
+ values.append(
612
+ json.dumps(
613
+ [
614
+ base_global_rank,
615
+ global_size,
616
+ role_ranks[role_info.role],
617
+ role_sizes[role_info.role],
618
+ ]
619
+ )
620
+ )
621
+
622
+ base_global_rank += role_info.local_world_size
623
+ role_ranks[role_info.role] += role_info.local_world_size
624
+
625
+ store.multi_set(keys, values)
626
+
627
+ # get will block until the data is available in the store.
628
+ (
629
+ base_global_rank,
630
+ global_world_size,
631
+ base_role_rank,
632
+ role_world_size,
633
+ ) = json.loads(store.get(f"{ASSIGNED_RANKS_PREFIX}{group_rank}"))
634
+
635
+ workers = []
636
+ for local_rank in range(spec.local_world_size):
637
+ worker = Worker(
638
+ local_rank=local_rank,
639
+ global_rank=base_global_rank + local_rank,
640
+ role_rank=base_role_rank + local_rank,
641
+ world_size=global_world_size,
642
+ role_world_size=role_world_size,
643
+ )
644
+ workers.append(worker)
645
+ return workers
646
+
647
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
648
+ # `torch.distributed.elastic.metrics.prof`.
649
+ @prof
650
+ def _initialize_workers(self, worker_group: WorkerGroup) -> None:
651
+ r"""Start a fresh set of workers for the worker_group.
652
+
653
+ Essentially, a rendezvous followed by a ``start_workers``.
654
+ The caller should first call ``_stop_workers()`` to stop running workers
655
+ prior to calling this method.
656
+
657
+ Optimistically sets the state of the worker group that
658
+ just started as ``HEALTHY`` and delegates the actual monitoring
659
+ of state to ``_monitor_workers()`` method
660
+ """
661
+ role = worker_group.spec.role
662
+ logger.info("[%s] Rendezvous'ing worker group", role)
663
+
664
+ # TODO after stopping workers, wait at least monitor_interval*2 for
665
+ # workers on different nodes to fail on a collective op before waiting
666
+ # on the rdzv barrier, this way we ensure that nodes enter rdzv
667
+ # at around the same time and reduce false positive rdzv timeout errors
668
+ self._rendezvous(worker_group)
669
+
670
+ logger.info("[%s] Starting worker group", role)
671
+ worker_ids = self._start_workers(worker_group)
672
+ for local_rank, w_id in worker_ids.items():
673
+ worker = worker_group.workers[local_rank]
674
+ worker.id = w_id
675
+
676
+ worker_group.state = WorkerState.HEALTHY
677
+
678
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
679
+ # `torch.distributed.elastic.metrics.prof`.
680
+ @prof
681
+ def _restart_workers(self, worker_group: WorkerGroup) -> None:
682
+ """Restart (stops, rendezvous, starts) all local workers in the group."""
683
+ role = worker_group.spec.role
684
+ logger.info("[%s] Stopping worker group", role)
685
+ self._stop_workers(worker_group, is_restart=True)
686
+ worker_group.state = WorkerState.STOPPED
687
+ self._initialize_workers(worker_group)
688
+
689
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
690
+ # `torch.distributed.elastic.metrics.prof`.
691
+ @prof
692
+ def run(self, role: str = DEFAULT_ROLE) -> RunResult:
693
+ start_time = time.monotonic()
694
+ shutdown_called: bool = False
695
+ try:
696
+ result = self._invoke_run(role)
697
+ self._total_execution_time = int(time.monotonic() - start_time)
698
+ self._record_metrics(result)
699
+ self._record_worker_events(result)
700
+ return result
701
+ except RendezvousGracefulExitError as e:
702
+ logger.info("Rendezvous gracefully exited: %s", e)
703
+ except SignalException as e:
704
+ logger.warning("Received %s death signal, shutting down workers", e.sigval)
705
+ self._shutdown(e.sigval)
706
+ shutdown_called = True
707
+ raise
708
+ finally:
709
+ if not shutdown_called:
710
+ self._shutdown()
711
+ # record the execution time in case there were any exceptions during run.
712
+ self._total_execution_time = int(time.monotonic() - start_time)
713
+
714
+ def get_event_failed(self) -> Event:
715
+ return self._construct_event(
716
+ state="FAILED",
717
+ source=EventSource.AGENT,
718
+ raw_error=traceback.format_exc(),
719
+ )
720
+
721
+ def get_event_succeeded(self) -> Event:
722
+ return self._construct_event(
723
+ state="SUCCEEDED",
724
+ source=EventSource.AGENT,
725
+ )
726
+
727
+ def _record_worker_events(self, result: RunResult) -> None:
728
+ for worker in self._worker_group.workers:
729
+ failure = result.failures.get(worker.global_rank)
730
+ state: str = self._get_worker_state(worker, result)
731
+ raw_error = json.dumps(failure.error_file_data) if failure else None
732
+ record(self._construct_event(state, EventSource.WORKER, worker, raw_error))
733
+
734
+ def _get_worker_state(self, worker: Worker, result: RunResult) -> str:
735
+ failure = result.failures.get(worker.global_rank)
736
+ if result.state in {WorkerState.UNHEALTHY, WorkerState.FAILED} and not failure:
737
+ # The worker got terminated by the torchelastic agent via SIGTERM signal
738
+ return "TERMINATED"
739
+ elif failure or worker.global_rank in result.return_values:
740
+ return result.state.value
741
+ else:
742
+ raise ValueError(f"Unknown worker: {worker.global_rank}")
743
+
744
+ @contextmanager
745
+ def record_duration(self, state: str):
746
+ start_time = time.perf_counter()
747
+ try:
748
+ yield
749
+ finally:
750
+ end_time = time.perf_counter()
751
+ duration_ms = (end_time - start_time) * 1000
752
+ record(
753
+ self._construct_event(
754
+ state=state, source=EventSource.AGENT, duration_ms=duration_ms
755
+ )
756
+ )
757
+
758
+ def _construct_event(
759
+ self,
760
+ state: str,
761
+ source: EventSource,
762
+ worker: Optional[Worker] = None,
763
+ raw_error: Optional[str] = None,
764
+ duration_ms: Optional[float] = None,
765
+ ) -> Event:
766
+ wg = self._worker_group
767
+ spec = wg.spec
768
+ md = {
769
+ "group_world_size": wg.group_world_size,
770
+ "entry_point": spec.get_entrypoint_name(),
771
+ }
772
+ if worker:
773
+ md["local_rank"] = (worker.local_rank,)
774
+ md["role_rank"] = (worker.role_rank,)
775
+ md["role_world_size"] = (worker.role_world_size,)
776
+ global_rank = worker.global_rank
777
+ worker_id = str(worker.id)
778
+ else:
779
+ global_rank = None
780
+ worker_id = None
781
+ md_str = json.dumps(md)
782
+ metadata = {
783
+ "run_id": spec.rdzv_handler.get_run_id(),
784
+ "global_rank": global_rank,
785
+ "group_rank": wg.group_rank,
786
+ "worker_id": worker_id,
787
+ "role": spec.role,
788
+ "hostname": _get_fq_hostname(),
789
+ "state": state,
790
+ "total_run_time": self._total_execution_time,
791
+ "rdzv_backend": spec.rdzv_handler.get_backend(),
792
+ "raw_error": raw_error,
793
+ "metadata": md_str,
794
+ "agent_restarts": spec.max_restarts - self._remaining_restarts,
795
+ "duration_ms": duration_ms,
796
+ }
797
+ return Event(
798
+ f"torchelastic.worker.status.{state}", source=source, metadata=metadata
799
+ )
800
+
801
+ def _record_metrics(self, group_results: RunResult):
802
+ is_failed = group_results.is_failed()
803
+ self._record_flakiness_metric(is_failed)
804
+ spec = self._worker_group.spec
805
+ restarts_happened = self._remaining_restarts != spec.max_restarts
806
+ put_metric(f"workers.{spec.role}.run_total", 1)
807
+ self._record_metric_with_condition(
808
+ "run_success_with_retries", not is_failed and restarts_happened
809
+ )
810
+ self._record_metric_with_condition(
811
+ "run_success_no_retries", not is_failed and not restarts_happened
812
+ )
813
+ self._record_metric_with_condition(
814
+ "run_failed_with_retries", is_failed and restarts_happened
815
+ )
816
+ self._record_metric_with_condition(
817
+ "run_failed_no_retries", is_failed and not restarts_happened
818
+ )
819
+
820
+ def _record_metric_with_condition(self, metric_name, condition):
821
+ spec = self._worker_group.spec
822
+ if condition:
823
+ put_metric(f"workers.{spec.role}.{metric_name}", 1)
824
+ else:
825
+ put_metric(f"workers.{spec.role}.{metric_name}", 0)
826
+
827
+ def _record_flakiness_metric(self, is_failed: bool = False):
828
+ if is_failed:
829
+ flakiness = 100.0
830
+ else:
831
+ spec = self._worker_group.spec
832
+ flakiness = 100.0 - 100.0 * (self._remaining_restarts + 1) / (
833
+ spec.max_restarts + 1
834
+ )
835
+ spec = self._worker_group.spec
836
+
837
+ put_metric(f"workers.{spec.role}.flakiness", int(flakiness))
838
+
839
+ def _invoke_run(self, role: str = DEFAULT_ROLE) -> RunResult:
840
+ # NOTE: currently only works for a single role
841
+
842
+ spec = self._worker_group.spec
843
+ role = spec.role
844
+
845
+ logger.info(
846
+ "[%s] starting workers for entrypoint: %s", role, spec.get_entrypoint_name()
847
+ )
848
+
849
+ self._initialize_workers(self._worker_group)
850
+ monitor_interval = spec.monitor_interval
851
+ rdzv_handler = spec.rdzv_handler
852
+
853
+ while True:
854
+ assert self._worker_group.state != WorkerState.INIT
855
+ time.sleep(monitor_interval)
856
+ run_result = self._monitor_workers(self._worker_group)
857
+ state = run_result.state
858
+ self._worker_group.state = state
859
+
860
+ put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts)
861
+ put_metric(f"workers.{role}.{state.name.lower()}", 1)
862
+
863
+ if state == WorkerState.SUCCEEDED:
864
+ logger.info(
865
+ "[%s] worker group successfully finished."
866
+ " Waiting %s seconds for other agents to finish.",
867
+ role,
868
+ self._exit_barrier_timeout,
869
+ )
870
+ self._exit_barrier()
871
+ return run_result
872
+ elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED}:
873
+ if self._remaining_restarts > 0:
874
+ logger.info(
875
+ "[%s] Worker group %s. "
876
+ "%s/%s attempts left;"
877
+ " will restart worker group",
878
+ role,
879
+ state.name,
880
+ self._remaining_restarts,
881
+ spec.max_restarts,
882
+ )
883
+ self._remaining_restarts -= 1
884
+ self._restart_workers(self._worker_group)
885
+ else:
886
+ self._stop_workers(self._worker_group)
887
+ self._worker_group.state = WorkerState.FAILED
888
+ return run_result
889
+ elif state == WorkerState.HEALTHY:
890
+ # membership changes do not count as retries
891
+ num_nodes_waiting = rdzv_handler.num_nodes_waiting()
892
+ group_rank = self._worker_group.group_rank
893
+ if num_nodes_waiting > 0:
894
+ logger.info(
895
+ "[%s] Detected %s "
896
+ "new nodes from group_rank=%s; "
897
+ "will restart worker group",
898
+ role,
899
+ num_nodes_waiting,
900
+ group_rank,
901
+ )
902
+ self._restart_workers(self._worker_group)
903
+ else:
904
+ raise Exception( # noqa: TRY002
905
+ f"[{role}] Worker group in {state.name} state"
906
+ )
907
+
908
+ def _exit_barrier(self):
909
+ """
910
+ Define a barrier that keeps the agent process alive until all workers finish.
911
+
912
+ Wait for ``exit_barrier_timeout`` seconds for all agents to finish
913
+ executing their local workers (either successfully or not). This
914
+ acts as a safety guard against user scripts that terminate at different
915
+ times.
916
+ """
917
+ logger.info(
918
+ "Local worker group finished (%s). "
919
+ "Waiting %s seconds for other agents to finish",
920
+ self._worker_group.state,
921
+ self._exit_barrier_timeout,
922
+ )
923
+ start = time.time()
924
+ try:
925
+ store_util.barrier(
926
+ store=self._store,
927
+ world_size=self._worker_group.group_world_size,
928
+ key_prefix=_TERMINAL_STATE_SYNC_ID,
929
+ barrier_timeout=self._exit_barrier_timeout,
930
+ )
931
+ logger.info(
932
+ "Done waiting for other agents. Elapsed: %s seconds",
933
+ time.time() - start,
934
+ )
935
+ except SignalException as e:
936
+ logger.warning("Got termination signal: %s", e.sigval)
937
+ raise
938
+ except Exception:
939
+ logger.exception(
940
+ "Error waiting on exit barrier. Elapsed: %s seconds",
941
+ time.time() - start,
942
+ )
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/health_check_server.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ from typing import Callable
10
+
11
+ from torch.distributed.elastic.utils.logging import get_logger
12
+
13
+
14
+ log = get_logger(__name__)
15
+
16
+ __all__ = ["HealthCheckServer", "create_healthcheck_server"]
17
+
18
+
19
+ class HealthCheckServer:
20
+ """
21
+ Interface for health check monitoring server, which can be extended
22
+ by starting tcp/http server on the specified port.
23
+
24
+ Args:
25
+
26
+ alive_callback: Callable[[], int], callback to last progress time of agent
27
+
28
+ port: int, port number to start tcp/http server
29
+
30
+ timeout: int, timeout seconds to decide agent is alive/dead
31
+ """
32
+
33
+ _alive_callback: Callable[[], int]
34
+ _port: int
35
+ _timeout: int
36
+
37
+ def __init__(
38
+ self, alive_callback: Callable[[], int], port: int, timeout: int
39
+ ) -> None:
40
+ self._alive_callback = alive_callback
41
+ self._port = port
42
+ self._timeout = timeout
43
+
44
+ def start(self) -> None:
45
+ """
46
+ Unsupported functionality for Pytorch, doesn't start any health check server
47
+ """
48
+ log.warning("No health check server started")
49
+
50
+ def stop(self) -> None:
51
+ """
52
+ Function to stop health check server
53
+ """
54
+ log.info("Stopping noop health check server.")
55
+
56
+
57
+ def create_healthcheck_server(
58
+ alive_callback: Callable[[], int],
59
+ port: int,
60
+ timeout: int,
61
+ ) -> HealthCheckServer:
62
+ """
63
+ creates health check server object
64
+ """
65
+ return HealthCheckServer(alive_callback, port, timeout)
vllm/lib/python3.10/site-packages/torch/distributed/elastic/agent/server/local_elastic_agent.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # mypy: allow-untyped-defs
3
+
4
+ # Copyright (c) Facebook, Inc. and its affiliates.
5
+ # All rights reserved.
6
+ #
7
+ # This source code is licensed under the BSD-style license found in the
8
+ # LICENSE file in the root directory of this source tree.
9
+
10
+
11
+ import json
12
+ import os
13
+ import signal
14
+ import socket
15
+ import time
16
+ import uuid
17
+ from string import Template
18
+ from typing import Any, Dict, Optional, Tuple, TYPE_CHECKING
19
+
20
+ import torch.distributed.elastic.timer as timer
21
+ from torch.distributed.elastic import events
22
+ from torch.distributed.elastic.agent.server.api import (
23
+ RunResult,
24
+ SimpleElasticAgent,
25
+ WorkerGroup,
26
+ WorkerSpec,
27
+ WorkerState,
28
+ )
29
+ from torch.distributed.elastic.agent.server.health_check_server import (
30
+ create_healthcheck_server,
31
+ HealthCheckServer,
32
+ )
33
+ from torch.distributed.elastic.metrics.api import prof
34
+ from torch.distributed.elastic.multiprocessing import (
35
+ LogsSpecs,
36
+ PContext,
37
+ start_processes,
38
+ )
39
+ from torch.distributed.elastic.utils import macros
40
+ from torch.distributed.elastic.utils.logging import get_logger
41
+
42
+
43
+ if TYPE_CHECKING:
44
+ from torch.distributed.elastic.events.api import EventMetadataValue
45
+
46
+ logger = get_logger(__name__)
47
+
48
+ __all__ = [
49
+ "LocalElasticAgent",
50
+ "TORCHELASTIC_ENABLE_FILE_TIMER",
51
+ "TORCHELASTIC_TIMER_FILE",
52
+ "TORCHELASTIC_HEALTH_CHECK_PORT",
53
+ ]
54
+
55
+ TORCHELASTIC_ENABLE_FILE_TIMER = "TORCHELASTIC_ENABLE_FILE_TIMER"
56
+ TORCHELASTIC_HEALTH_CHECK_PORT = "TORCHELASTIC_HEALTH_CHECK_PORT"
57
+ TORCHELASTIC_TIMER_FILE = "TORCHELASTIC_TIMER_FILE"
58
+
59
+
60
+ class LocalElasticAgent(SimpleElasticAgent):
61
+ """An implementation of :py:class:`torchelastic.agent.server.ElasticAgent` that handles host-local workers.
62
+
63
+ This agent is deployed per host and is configured to spawn ``n`` workers.
64
+ When using GPUs, ``n`` maps to the number of GPUs available on the host.
65
+
66
+ The local agent does not communicate to other local agents deployed on
67
+ other hosts, even if the workers may communicate inter-host. The worker id
68
+ is interpreted to be a local process. The agent starts and stops all worker
69
+ processes as a single unit.
70
+
71
+
72
+ The worker function and argument passed to the worker function must be
73
+ python multiprocessing compatible. To pass multiprocessing data structures
74
+ to the workers you may create the data structure in the same multiprocessing
75
+ context as the specified ``start_method`` and pass it as a function argument.
76
+
77
+ The ``exit_barrier_timeout`` specifies the amount of time (in seconds) to wait
78
+ for other agents to finish. This acts as a safety net to handle cases where
79
+ workers finish at different times, to prevent agents from viewing workers
80
+ that finished early as a scale-down event. It is strongly advised that the
81
+ user code deal with ensuring that workers are terminated in a synchronous
82
+ manner rather than relying on the exit_barrier_timeout.
83
+
84
+ A named pipe based watchdog can be enabled in ```LocalElasticAgent``` if an
85
+ environment variable ``TORCHELASTIC_ENABLE_FILE_TIMER`` with value 1 has
86
+ been defined in the ```LocalElasticAgent``` process.
87
+ Optionally, another environment variable ```TORCHELASTIC_TIMER_FILE```
88
+ can be set with a unique file name for the named pipe. If the environment
89
+ variable ```TORCHELASTIC_TIMER_FILE``` is not set, ```LocalElasticAgent```
90
+ will internally create a unique file name and set it to the environment
91
+ variable ```TORCHELASTIC_TIMER_FILE```, and this environment variable will
92
+ be propagated to the worker processes to allow them to connect to the same
93
+ named pipe that ```LocalElasticAgent``` uses.
94
+
95
+ Logs are written to the specified log directory. Each log line will be by default
96
+ prefixed by ``[${role_name}${local_rank}]:`` (e.g. ``[trainer0]: foobar``).
97
+ Log prefixes can be customized by passing a `template string
98
+ <https://docs.python.org/3/library/string.html#template-strings>`_ as the
99
+ ``log_line_prefix_template`` argument.
100
+ The following macros (identifiers) are substituted at runtime:
101
+ ``${role_name}, ${local_rank}, ${rank}``. For example, to prefix each log line with
102
+ global rank instead of the local rank, set ``log_line_prefix_template = "[${rank}]:``.
103
+
104
+
105
+ Example launching function
106
+
107
+ ::
108
+
109
+ def trainer(args) -> str:
110
+ return "do train"
111
+
112
+ def main():
113
+ start_method="spawn"
114
+ shared_queue= multiprocessing.get_context(start_method).Queue()
115
+ spec = WorkerSpec(
116
+ role="trainer",
117
+ local_world_size=nproc_per_process,
118
+ entrypoint=trainer,
119
+ args=("foobar",),
120
+ ...<OTHER_PARAMS...>)
121
+ agent = LocalElasticAgent(spec, start_method)
122
+ results = agent.run()
123
+
124
+ if results.is_failed():
125
+ print("trainer failed")
126
+ else:
127
+ print(f"rank 0 return value: {results.return_values[0]}")
128
+ # prints -> rank 0 return value: do train
129
+
130
+ Example launching binary
131
+
132
+ ::
133
+
134
+ def main():
135
+ spec = WorkerSpec(
136
+ role="trainer",
137
+ local_world_size=nproc_per_process,
138
+ entrypoint="/usr/local/bin/trainer",
139
+ args=("--trainer-args", "foobar"),
140
+ ...<OTHER_PARAMS...>)
141
+ agent = LocalElasticAgent(spec)
142
+ results = agent.run()
143
+
144
+ if not results.is_failed():
145
+ print("binary launches do not have return values")
146
+
147
+ """
148
+
149
+ def __init__(
150
+ self,
151
+ spec: WorkerSpec,
152
+ logs_specs: LogsSpecs,
153
+ start_method="spawn",
154
+ exit_barrier_timeout: float = 300,
155
+ log_line_prefix_template: Optional[str] = None,
156
+ ):
157
+ super().__init__(spec, exit_barrier_timeout)
158
+ self._start_method = start_method
159
+ self._pcontext: Optional[PContext] = None
160
+ self._rdzv_handler = spec.rdzv_handler
161
+ self._log_line_prefix_template = log_line_prefix_template
162
+ self._worker_watchdog: Optional[timer.FileTimerServer] = None
163
+ self._logs_specs = logs_specs
164
+ self._health_check_server: Optional[HealthCheckServer] = None
165
+
166
+ def _setup_local_watchdog(self, envs: Dict[int, Dict[str, str]]) -> None:
167
+ enable_watchdog_env_name = TORCHELASTIC_ENABLE_FILE_TIMER
168
+ watchdog_enabled = os.getenv(enable_watchdog_env_name)
169
+ watchdog_file_env_name = TORCHELASTIC_TIMER_FILE
170
+ watchdog_file_path = os.getenv(watchdog_file_env_name)
171
+ if watchdog_enabled is not None and str(watchdog_enabled) == "1":
172
+ if watchdog_file_path is None:
173
+ watchdog_file_path = "/tmp/watchdog_timer_" + str(uuid.uuid4())
174
+ logger.info("Starting a FileTimerServer with %s ...", watchdog_file_path)
175
+ if not envs:
176
+ logger.warning(
177
+ "Empty envs variables, using empty run_id for FileTimerServer"
178
+ )
179
+ run_id = ""
180
+ else:
181
+ run_id = envs[0]["TORCHELASTIC_RUN_ID"]
182
+ self._worker_watchdog = timer.FileTimerServer(
183
+ file_path=watchdog_file_path,
184
+ run_id=run_id,
185
+ max_interval=0.1,
186
+ daemon=True,
187
+ log_event=self._log_watchdog_event,
188
+ )
189
+ self._worker_watchdog.start()
190
+ logger.info("FileTimerServer started")
191
+ else:
192
+ logger.info(
193
+ "Environment variable '%s' not found. Do not start FileTimerServer.",
194
+ enable_watchdog_env_name,
195
+ )
196
+ # Propagate the watchdog file env to worker processes
197
+ if watchdog_file_path is not None:
198
+ for worker_env in envs.values():
199
+ worker_env[watchdog_file_env_name] = watchdog_file_path
200
+
201
+ @staticmethod
202
+ def _get_current_time_secs() -> int:
203
+ return int(time.time())
204
+
205
+ def _setup_healthcheck(self) -> None:
206
+ healthcheck_port_env_name = TORCHELASTIC_HEALTH_CHECK_PORT
207
+ healthcheck_port = os.getenv(healthcheck_port_env_name)
208
+ if healthcheck_port is not None:
209
+ logger.info(
210
+ "Found healthcheck port %s: %s",
211
+ healthcheck_port_env_name,
212
+ healthcheck_port,
213
+ )
214
+ if self._worker_watchdog is None:
215
+ logger.info(
216
+ "FileTimerServer doesn't exist, using current time as dummy callback"
217
+ )
218
+ alive_callback = LocalElasticAgent._get_current_time_secs
219
+ else:
220
+ alive_callback = self._worker_watchdog.get_last_progress_time
221
+
222
+ self._health_check_server = create_healthcheck_server(
223
+ alive_callback=alive_callback,
224
+ port=int(healthcheck_port),
225
+ timeout=60,
226
+ )
227
+ self._health_check_server.start()
228
+ else:
229
+ logger.info(
230
+ "Environment variable '%s' not found. Do not start health check.",
231
+ healthcheck_port_env_name,
232
+ )
233
+
234
+ def _get_fq_hostname(self) -> str:
235
+ return socket.getfqdn(socket.gethostname())
236
+
237
+ def _log_watchdog_event(
238
+ self,
239
+ name: str,
240
+ request: Optional[timer.FileTimerRequest],
241
+ ) -> None:
242
+ wg = self._worker_group
243
+ spec = wg.spec
244
+ md = {"watchdog_event": name}
245
+ if request is not None:
246
+ md["worker_pid"] = str(request.worker_pid)
247
+ md["scope_id"] = request.scope_id
248
+ md["expiration_time"] = str(request.expiration_time)
249
+ md["signal"] = str(request.signal)
250
+ md_str = json.dumps(md)
251
+ state = "RUNNING"
252
+ metadata: Dict[str, EventMetadataValue] = {
253
+ "run_id": spec.rdzv_handler.get_run_id(),
254
+ "global_rank": None,
255
+ "group_rank": wg.group_rank,
256
+ "worker_id": None,
257
+ "role": spec.role,
258
+ "hostname": self._get_fq_hostname(),
259
+ "state": state,
260
+ "total_run_time": self._total_execution_time,
261
+ "rdzv_backend": spec.rdzv_handler.get_backend(),
262
+ "raw_error": None,
263
+ "metadata": md_str,
264
+ "agent_restarts": spec.max_restarts - self._remaining_restarts,
265
+ }
266
+ # Note: The 'metadata' field of the Event is converted to a TorchelasticStatusLogEntry later.
267
+ # The 'name' field of the Event is NOT used in the TorchelasticStatusLogEntry.
268
+ event = events.Event(
269
+ name=name, source=events.EventSource.AGENT, metadata=metadata
270
+ )
271
+ events.record(event)
272
+
273
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
274
+ # `torch.distributed.elastic.metrics.prof`.
275
+ @prof
276
+ def _stop_workers(
277
+ self, worker_group: WorkerGroup, is_restart: bool = False
278
+ ) -> None:
279
+ self._shutdown(is_restart=is_restart)
280
+
281
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
282
+ # `torch.distributed.elastic.metrics.prof`.
283
+ @prof
284
+ def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
285
+ spec = worker_group.spec
286
+ store = worker_group.store
287
+ assert store is not None
288
+ restart_count = spec.max_restarts - self._remaining_restarts
289
+
290
+ use_agent_store: bool = spec.rdzv_handler.use_agent_store
291
+ logger.info("use_agent_store: %s", use_agent_store)
292
+
293
+ args: Dict[int, Tuple] = {}
294
+ envs: Dict[int, Dict[str, str]] = {}
295
+ log_line_prefixes: Optional[Dict[int, str]] = (
296
+ {} if self._log_line_prefix_template else None
297
+ )
298
+ for worker in worker_group.workers:
299
+ local_rank = worker.local_rank
300
+ worker_env = {
301
+ "LOCAL_RANK": str(local_rank),
302
+ "RANK": str(worker.global_rank),
303
+ "GROUP_RANK": str(worker_group.group_rank),
304
+ "ROLE_RANK": str(worker.role_rank),
305
+ "ROLE_NAME": spec.role,
306
+ "LOCAL_WORLD_SIZE": str(spec.local_world_size),
307
+ "WORLD_SIZE": str(worker.world_size),
308
+ "GROUP_WORLD_SIZE": str(worker_group.group_world_size),
309
+ "ROLE_WORLD_SIZE": str(worker.role_world_size),
310
+ "MASTER_ADDR": worker_group.master_addr,
311
+ "MASTER_PORT": str(worker_group.master_port),
312
+ "TORCHELASTIC_RESTART_COUNT": str(restart_count),
313
+ "TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
314
+ "TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
315
+ "TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
316
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING": os.getenv(
317
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING", str(1)
318
+ ),
319
+ }
320
+ if "OMP_NUM_THREADS" in os.environ:
321
+ worker_env["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
322
+
323
+ if self._log_line_prefix_template:
324
+ log_line_prefix = Template(
325
+ self._log_line_prefix_template
326
+ ).safe_substitute(
327
+ role_name=spec.role,
328
+ rank=worker.global_rank,
329
+ local_rank=local_rank,
330
+ )
331
+ log_line_prefixes[local_rank] = log_line_prefix
332
+
333
+ envs[local_rank] = worker_env
334
+ worker_args = list(spec.args)
335
+ worker_args = macros.substitute(worker_args, str(local_rank))
336
+ args[local_rank] = tuple(worker_args)
337
+
338
+ self._setup_local_watchdog(envs=envs)
339
+ self._setup_healthcheck()
340
+
341
+ assert spec.entrypoint is not None
342
+ assert self._logs_specs is not None
343
+ self._pcontext = start_processes(
344
+ name=spec.role,
345
+ entrypoint=spec.entrypoint,
346
+ args=args,
347
+ envs=envs,
348
+ logs_specs=self._logs_specs,
349
+ log_line_prefixes=log_line_prefixes,
350
+ start_method=self._start_method,
351
+ )
352
+
353
+ return self._pcontext.pids()
354
+
355
+ def _shutdown(
356
+ self, death_sig: signal.Signals = signal.SIGTERM, is_restart: bool = False
357
+ ) -> None:
358
+ if self._worker_watchdog is not None:
359
+ self._worker_watchdog.stop()
360
+ self._worker_watchdog = None
361
+ if self._health_check_server is not None:
362
+ self._health_check_server.stop()
363
+ self._health_check_server = None
364
+ if self._pcontext:
365
+ self._pcontext.close(death_sig)
366
+ if not is_restart and self._rdzv_handler:
367
+ self._rdzv_handler.shutdown()
368
+
369
+ # pyre-fixme[56]: Pyre was not able to infer the type of the decorator
370
+ # `torch.distributed.elastic.metrics.prof`.
371
+ @prof
372
+ def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
373
+ role = worker_group.spec.role
374
+ worker_pids = {w.id for w in worker_group.workers}
375
+ assert self._pcontext is not None
376
+ pc_pids = set(self._pcontext.pids().values())
377
+ if worker_pids != pc_pids:
378
+ logger.error(
379
+ "[%s] worker pids do not match process_context pids."
380
+ " Expected: %s, actual: %s",
381
+ role,
382
+ worker_pids,
383
+ pc_pids,
384
+ )
385
+ return RunResult(state=WorkerState.UNKNOWN)
386
+
387
+ result = self._pcontext.wait(0)
388
+ if result:
389
+ if result.is_failed():
390
+ # map local rank failure to global rank
391
+ worker_failures = {}
392
+ for local_rank, failure in result.failures.items():
393
+ worker = worker_group.workers[local_rank]
394
+ worker_failures[worker.global_rank] = failure
395
+ return RunResult(
396
+ state=WorkerState.FAILED,
397
+ failures=worker_failures,
398
+ )
399
+ else:
400
+ # copy ret_val_queue into a map with a global ranks
401
+ workers_ret_vals = {}
402
+ for local_rank, ret_val in result.return_values.items():
403
+ worker = worker_group.workers[local_rank]
404
+ workers_ret_vals[worker.global_rank] = ret_val
405
+ return RunResult(
406
+ state=WorkerState.SUCCEEDED,
407
+ return_values=workers_ret_vals,
408
+ )
409
+ else:
410
+ return RunResult(state=WorkerState.HEALTHY)
vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/__init__.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env/python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ """
10
+ Module contains events processing mechanisms that are integrated with the standard python logging.
11
+
12
+ Example of usage:
13
+
14
+ ::
15
+
16
+ from torch.distributed.elastic import events
17
+ event = events.Event(name="test_event", source=events.EventSource.WORKER, metadata={...})
18
+ events.get_logging_handler(destination="console").info(event)
19
+
20
+ """
21
+
22
+ import inspect
23
+ import logging
24
+ import os
25
+ import socket
26
+ import traceback
27
+ from typing import Dict, Optional
28
+
29
+ from torch.distributed.elastic.events.handlers import get_logging_handler
30
+
31
+ from .api import ( # noqa: F401
32
+ Event,
33
+ EventMetadataValue,
34
+ EventSource,
35
+ NodeState,
36
+ RdzvEvent,
37
+ )
38
+
39
+
40
+ _events_loggers: Dict[str, logging.Logger] = {}
41
+
42
+
43
+ def _get_or_create_logger(destination: str = "null") -> logging.Logger:
44
+ """
45
+ Construct python logger based on the destination type or extends if provided.
46
+
47
+ Available destination could be found in ``handlers.py`` file.
48
+ The constructed logger does not propagate messages to the upper level loggers,
49
+ e.g. root logger. This makes sure that a single event can be processed once.
50
+
51
+ Args:
52
+ destination: The string representation of the event handler.
53
+ Available handlers found in ``handlers`` module
54
+ """
55
+ global _events_loggers
56
+
57
+ if destination not in _events_loggers:
58
+ _events_logger = logging.getLogger(f"torchelastic-events-{destination}")
59
+ _events_logger.setLevel(os.environ.get("LOGLEVEL", "INFO"))
60
+ # Do not propagate message to the root logger
61
+ _events_logger.propagate = False
62
+
63
+ logging_handler = get_logging_handler(destination)
64
+ _events_logger.addHandler(logging_handler)
65
+
66
+ # Add the logger to the global dictionary
67
+ _events_loggers[destination] = _events_logger
68
+
69
+ return _events_loggers[destination]
70
+
71
+
72
+ def record(event: Event, destination: str = "null") -> None:
73
+ _get_or_create_logger(destination).info(event.serialize())
74
+
75
+
76
+ def record_rdzv_event(event: RdzvEvent) -> None:
77
+ _get_or_create_logger("dynamic_rendezvous").info(event.serialize())
78
+
79
+
80
+ def construct_and_record_rdzv_event(
81
+ run_id: str,
82
+ message: str,
83
+ node_state: NodeState,
84
+ name: str = "",
85
+ hostname: str = "",
86
+ pid: Optional[int] = None,
87
+ master_endpoint: str = "",
88
+ local_id: Optional[int] = None,
89
+ rank: Optional[int] = None,
90
+ ) -> None:
91
+ """
92
+ Initialize rendezvous event object and record its operations.
93
+
94
+ Args:
95
+ run_id (str): The run id of the rendezvous.
96
+ message (str): The message describing the event.
97
+ node_state (NodeState): The state of the node (INIT, RUNNING, SUCCEEDED, FAILED).
98
+ name (str): Event name. (E.g. Current action being performed).
99
+ hostname (str): Hostname of the node.
100
+ pid (Optional[int]): The process id of the node.
101
+ master_endpoint (str): The master endpoint for the rendezvous store, if known.
102
+ local_id (Optional[int]): The local_id of the node, if defined in dynamic_rendezvous.py
103
+ rank (Optional[int]): The rank of the node, if known.
104
+ Returns:
105
+ None
106
+ Example:
107
+ >>> # See DynamicRendezvousHandler class
108
+ >>> def _record(
109
+ ... self,
110
+ ... message: str,
111
+ ... node_state: NodeState = NodeState.RUNNING,
112
+ ... rank: Optional[int] = None,
113
+ ... ) -> None:
114
+ ... construct_and_record_rdzv_event(
115
+ ... name=f"{self.__class__.__name__}.{get_method_name()}",
116
+ ... run_id=self._settings.run_id,
117
+ ... message=message,
118
+ ... node_state=node_state,
119
+ ... hostname=self._this_node.addr,
120
+ ... pid=self._this_node.pid,
121
+ ... local_id=self._this_node.local_id,
122
+ ... rank=rank,
123
+ ... )
124
+ """
125
+ # We don't want to perform an extra computation if not needed.
126
+ if isinstance(get_logging_handler("dynamic_rendezvous"), logging.NullHandler):
127
+ return
128
+
129
+ # Set up parameters.
130
+ if not hostname:
131
+ hostname = socket.getfqdn()
132
+ if not pid:
133
+ pid = os.getpid()
134
+
135
+ # Determines which file called this function.
136
+ callstack = inspect.stack()
137
+ filename = "no_file"
138
+ if len(callstack) > 1:
139
+ stack_depth_1 = callstack[1]
140
+ filename = os.path.basename(stack_depth_1.filename)
141
+ if not name:
142
+ name = stack_depth_1.function
143
+
144
+ # Delete the callstack variable. If kept, this can mess with python's
145
+ # garbage collector as we are holding on to stack frame information in
146
+ # the inspect module.
147
+ del callstack
148
+
149
+ # Set up error trace if this is an exception
150
+ if node_state == NodeState.FAILED:
151
+ error_trace = traceback.format_exc()
152
+ else:
153
+ error_trace = ""
154
+
155
+ # Initialize event object
156
+ event = RdzvEvent(
157
+ name=f"{filename}:{name}",
158
+ run_id=run_id,
159
+ message=message,
160
+ hostname=hostname,
161
+ pid=pid,
162
+ node_state=node_state,
163
+ master_endpoint=master_endpoint,
164
+ rank=rank,
165
+ local_id=local_id,
166
+ error_trace=error_trace,
167
+ )
168
+
169
+ # Finally, record the event.
170
+ record_rdzv_event(event)
vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.52 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/__pycache__/handlers.cpython-310.pyc ADDED
Binary file (573 Bytes). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/api.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # mypy: allow-untyped-defs
3
+
4
+ # Copyright (c) Facebook, Inc. and its affiliates.
5
+ # All rights reserved.
6
+ #
7
+ # This source code is licensed under the BSD-style license found in the
8
+ # LICENSE file in the root directory of this source tree.
9
+
10
+ import json
11
+ from dataclasses import asdict, dataclass, field
12
+ from enum import Enum
13
+ from typing import Dict, Optional, Union
14
+
15
+
16
+ __all__ = ["EventSource", "Event", "NodeState", "RdzvEvent"]
17
+
18
+ EventMetadataValue = Union[str, int, float, bool, None]
19
+
20
+
21
+ class EventSource(str, Enum):
22
+ """Known identifiers of the event producers."""
23
+
24
+ AGENT = "AGENT"
25
+ WORKER = "WORKER"
26
+
27
+
28
+ @dataclass
29
+ class Event:
30
+ """
31
+ The class represents the generic event that occurs during the torchelastic job execution.
32
+
33
+ The event can be any kind of meaningful action.
34
+
35
+ Args:
36
+ name: event name.
37
+ source: the event producer, e.g. agent or worker
38
+ timestamp: timestamp in milliseconds when event occurred.
39
+ metadata: additional data that is associated with the event.
40
+ """
41
+
42
+ name: str
43
+ source: EventSource
44
+ timestamp: int = 0
45
+ metadata: Dict[str, EventMetadataValue] = field(default_factory=dict)
46
+
47
+ def __str__(self):
48
+ return self.serialize()
49
+
50
+ @staticmethod
51
+ def deserialize(data: Union[str, "Event"]) -> "Event":
52
+ if isinstance(data, Event):
53
+ return data
54
+ if isinstance(data, str):
55
+ data_dict = json.loads(data)
56
+ data_dict["source"] = EventSource[data_dict["source"]] # type: ignore[possibly-undefined]
57
+ return Event(**data_dict)
58
+
59
+ def serialize(self) -> str:
60
+ return json.dumps(asdict(self))
61
+
62
+
63
+ class NodeState(str, Enum):
64
+ """The states that a node can be in rendezvous."""
65
+
66
+ INIT = "INIT"
67
+ RUNNING = "RUNNING"
68
+ SUCCEEDED = "SUCCEEDED"
69
+ FAILED = "FAILED"
70
+
71
+
72
+ @dataclass
73
+ class RdzvEvent:
74
+ """
75
+ Dataclass to represent any rendezvous event.
76
+
77
+ Args:
78
+ name: Event name. (E.g. Current action being performed)
79
+ run_id: The run id of the rendezvous
80
+ message: The message describing the event
81
+ hostname: Hostname of the node
82
+ pid: The process id of the node
83
+ node_state: The state of the node (INIT, RUNNING, SUCCEEDED, FAILED)
84
+ master_endpoint: The master endpoint for the rendezvous store, if known
85
+ rank: The rank of the node, if known
86
+ local_id: The local_id of the node, if defined in dynamic_rendezvous.py
87
+ error_trace: Error stack trace, if this is an error event.
88
+ """
89
+
90
+ name: str
91
+ run_id: str
92
+ message: str
93
+ hostname: str
94
+ pid: int
95
+ node_state: NodeState
96
+ master_endpoint: str = ""
97
+ rank: Optional[int] = None
98
+ local_id: Optional[int] = None
99
+ error_trace: str = ""
100
+
101
+ def __str__(self):
102
+ return self.serialize()
103
+
104
+ @staticmethod
105
+ def deserialize(data: Union[str, "RdzvEvent"]) -> "RdzvEvent":
106
+ if isinstance(data, RdzvEvent):
107
+ return data
108
+ if isinstance(data, str):
109
+ data_dict = json.loads(data)
110
+ data_dict["node_state"] = NodeState[data_dict["node_state"]] # type: ignore[possibly-undefined]
111
+ return RdzvEvent(**data_dict)
112
+
113
+ def serialize(self) -> str:
114
+ return json.dumps(asdict(self))
vllm/lib/python3.10/site-packages/torch/distributed/elastic/events/handlers.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ import logging
10
+ from typing import Dict
11
+
12
+
13
+ _log_handlers: Dict[str, logging.Handler] = {
14
+ "console": logging.StreamHandler(),
15
+ "dynamic_rendezvous": logging.NullHandler(),
16
+ "null": logging.NullHandler(),
17
+ }
18
+
19
+
20
+ def get_logging_handler(destination: str = "null") -> logging.Handler:
21
+ global _log_handlers
22
+ return _log_handlers[destination]
vllm/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__init__.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env/python3
2
+ # mypy: allow-untyped-defs
3
+
4
+ # Copyright (c) Facebook, Inc. and its affiliates.
5
+ # All rights reserved.
6
+ #
7
+ # This source code is licensed under the BSD-style license found in the
8
+ # LICENSE file in the root directory of this source tree.
9
+
10
+ """Metrics API.
11
+
12
+ **Overview**:
13
+
14
+ The metrics API in torchelastic is used to publish telemetry metrics.
15
+ It is designed to be used by torchelastic's internal modules to
16
+ publish metrics for the end user with the goal of increasing visibility
17
+ and helping with debugging. However you may use the same API in your
18
+ jobs to publish metrics to the same metrics ``sink``.
19
+
20
+ A ``metric`` can be thought of as timeseries data
21
+ and is uniquely identified by the string-valued tuple
22
+ ``(metric_group, metric_name)``.
23
+
24
+ torchelastic makes no assumptions about what a ``metric_group`` is
25
+ and what relationship it has with ``metric_name``. It is totally up
26
+ to the user to use these two fields to uniquely identify a metric.
27
+
28
+ .. note:: The metric group ``torchelastic`` is reserved by torchelastic for
29
+ platform level metrics that it produces.
30
+ For instance torchelastic may output the latency (in milliseconds)
31
+ of a re-rendezvous operation from the agent as
32
+ ``(torchelastic, agent.rendezvous.duration.ms)``
33
+
34
+ A sensible way to use metric groups is to map them to a stage or module
35
+ in your job. You may also encode certain high level properties
36
+ the job such as the region or stage (dev vs prod).
37
+
38
+ **Publish Metrics**:
39
+
40
+ Using torchelastic's metrics API is similar to using python's logging
41
+ framework. You first have to configure a metrics handler before
42
+ trying to add metric data.
43
+
44
+ The example below measures the latency for the ``calculate()`` function.
45
+
46
+ ::
47
+
48
+ import time
49
+ import torch.distributed.elastic.metrics as metrics
50
+
51
+ # makes all metrics other than the one from "my_module" to go /dev/null
52
+ metrics.configure(metrics.NullMetricsHandler())
53
+ metrics.configure(metrics.ConsoleMetricsHandler(), "my_module")
54
+
55
+ def my_method():
56
+ start = time.time()
57
+ calculate()
58
+ end = time.time()
59
+ metrics.put_metric("calculate_latency", int(end-start), "my_module")
60
+
61
+ You may also use the torch.distributed.elastic.metrics.prof` decorator
62
+ to conveniently and succinctly profile functions
63
+
64
+ ::
65
+
66
+ # -- in module examples.foobar --
67
+
68
+ import torch.distributed.elastic.metrics as metrics
69
+
70
+ metrics.configure(metrics.ConsoleMetricsHandler(), "foobar")
71
+ metrics.configure(metrics.ConsoleMetricsHandler(), "Bar")
72
+
73
+ @metrics.prof
74
+ def foo():
75
+ pass
76
+
77
+ class Bar():
78
+
79
+ @metrics.prof
80
+ def baz():
81
+ pass
82
+
83
+ ``@metrics.prof`` will publish the following metrics
84
+ ::
85
+
86
+ <leaf_module or classname>.success - 1 if the function finished successfully
87
+ <leaf_module or classname>.failure - 1 if the function threw an exception
88
+ <leaf_module or classname>.duration.ms - function duration in milliseconds
89
+
90
+ **Configuring Metrics Handler**:
91
+
92
+ `torch.distributed.elastic.metrics.MetricHandler` is responsible for emitting
93
+ the added metric values to a particular destination. Metric groups can be
94
+ configured with different metric handlers.
95
+
96
+ By default torchelastic emits all metrics to ``/dev/null``.
97
+ By adding the following configuration metrics,
98
+ ``torchelastic`` and ``my_app`` metric groups will be printed out to
99
+ console.
100
+
101
+ ::
102
+
103
+ import torch.distributed.elastic.metrics as metrics
104
+
105
+ metrics.configure(metrics.ConsoleMetricHandler(), group = "torchelastic")
106
+ metrics.configure(metrics.ConsoleMetricHandler(), group = "my_app")
107
+
108
+ **Writing a Custom Metric Handler**:
109
+
110
+ If you want your metrics to be emitted to a custom location, implement
111
+ the `torch.distributed.elastic.metrics.MetricHandler` interface
112
+ and configure your job to use your custom metric handler.
113
+
114
+ Below is a toy example that prints the metrics to ``stdout``
115
+
116
+ ::
117
+
118
+ import torch.distributed.elastic.metrics as metrics
119
+
120
+ class StdoutMetricHandler(metrics.MetricHandler):
121
+ def emit(self, metric_data):
122
+ ts = metric_data.timestamp
123
+ group = metric_data.group_name
124
+ name = metric_data.name
125
+ value = metric_data.value
126
+ print(f"[{ts}][{group}]: {name}={value}")
127
+
128
+ metrics.configure(StdoutMetricHandler(), group="my_app")
129
+
130
+ Now all metrics in the group ``my_app`` will be printed to stdout as:
131
+
132
+ ::
133
+
134
+ [1574213883.4182858][my_app]: my_metric=<value>
135
+ [1574213940.5237644][my_app]: my_metric=<value>
136
+
137
+ """
138
+
139
+ from typing import Optional
140
+
141
+ from .api import ( # noqa: F401
142
+ configure,
143
+ ConsoleMetricHandler,
144
+ get_elapsed_time_ms,
145
+ getStream,
146
+ MetricData,
147
+ MetricHandler,
148
+ MetricsConfig,
149
+ NullMetricHandler,
150
+ prof,
151
+ profile,
152
+ publish_metric,
153
+ put_metric,
154
+ )
155
+
156
+
157
+ def initialize_metrics(cfg: Optional[MetricsConfig] = None):
158
+ pass
159
+
160
+
161
+ try:
162
+ from torch.distributed.elastic.metrics.static_init import * # type: ignore[import] # noqa: F401 F403
163
+ except ModuleNotFoundError:
164
+ pass
vllm/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.91 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/metrics/__pycache__/api.cpython-310.pyc ADDED
Binary file (5.98 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/metrics/api.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # mypy: allow-untyped-defs
3
+
4
+ # Copyright (c) Facebook, Inc. and its affiliates.
5
+ # All rights reserved.
6
+ #
7
+ # This source code is licensed under the BSD-style license found in the
8
+ # LICENSE file in the root directory of this source tree.
9
+
10
+ import abc
11
+ import time
12
+ from collections import namedtuple
13
+ from functools import wraps
14
+ from typing import Dict, Optional
15
+ from typing_extensions import deprecated
16
+
17
+
18
+ __all__ = [
19
+ "MetricsConfig",
20
+ "MetricHandler",
21
+ "ConsoleMetricHandler",
22
+ "NullMetricHandler",
23
+ "MetricStream",
24
+ "configure",
25
+ "getStream",
26
+ "prof",
27
+ "profile",
28
+ "put_metric",
29
+ "publish_metric",
30
+ "get_elapsed_time_ms",
31
+ "MetricData",
32
+ ]
33
+
34
+ MetricData = namedtuple("MetricData", ["timestamp", "group_name", "name", "value"])
35
+
36
+
37
+ class MetricsConfig:
38
+ __slots__ = ["params"]
39
+
40
+ def __init__(self, params: Optional[Dict[str, str]] = None):
41
+ self.params = params
42
+ if self.params is None:
43
+ self.params = {}
44
+
45
+
46
+ class MetricHandler(abc.ABC):
47
+ @abc.abstractmethod
48
+ def emit(self, metric_data: MetricData):
49
+ pass
50
+
51
+
52
+ class ConsoleMetricHandler(MetricHandler):
53
+ def emit(self, metric_data: MetricData):
54
+ print(
55
+ f"[{metric_data.timestamp}][{metric_data.group_name}]: {metric_data.name}={metric_data.value}"
56
+ )
57
+
58
+
59
+ class NullMetricHandler(MetricHandler):
60
+ def emit(self, metric_data: MetricData):
61
+ pass
62
+
63
+
64
+ class MetricStream:
65
+ def __init__(self, group_name: str, handler: MetricHandler):
66
+ self.group_name = group_name
67
+ self.handler = handler
68
+
69
+ def add_value(self, metric_name: str, metric_value: int):
70
+ self.handler.emit(
71
+ MetricData(time.time(), self.group_name, metric_name, metric_value)
72
+ )
73
+
74
+
75
+ _metrics_map: Dict[str, MetricHandler] = {}
76
+ _default_metrics_handler: MetricHandler = NullMetricHandler()
77
+
78
+
79
+ # pyre-fixme[9]: group has type `str`; used as `None`.
80
+ def configure(handler: MetricHandler, group: Optional[str] = None):
81
+ if group is None:
82
+ global _default_metrics_handler
83
+ # pyre-fixme[9]: _default_metrics_handler has type `NullMetricHandler`; used
84
+ # as `MetricHandler`.
85
+ _default_metrics_handler = handler
86
+ else:
87
+ _metrics_map[group] = handler
88
+
89
+
90
+ def getStream(group: str):
91
+ if group in _metrics_map:
92
+ handler = _metrics_map[group]
93
+ else:
94
+ handler = _default_metrics_handler
95
+ return MetricStream(group, handler)
96
+
97
+
98
+ def _get_metric_name(fn):
99
+ qualname = fn.__qualname__
100
+ split = qualname.split(".")
101
+ if len(split) == 1:
102
+ module = fn.__module__
103
+ if module:
104
+ return module.split(".")[-1] + "." + split[0]
105
+ else:
106
+ return split[0]
107
+ else:
108
+ return qualname
109
+
110
+
111
+ def prof(fn=None, group: str = "torchelastic"):
112
+ r"""
113
+ @profile decorator publishes duration.ms, count, success, failure metrics for the function that it decorates.
114
+
115
+ The metric name defaults to the qualified name (``class_name.def_name``) of the function.
116
+ If the function does not belong to a class, it uses the leaf module name instead.
117
+
118
+ Usage
119
+
120
+ ::
121
+
122
+ @metrics.prof
123
+ def x():
124
+ pass
125
+
126
+ @metrics.prof(group="agent")
127
+ def y():
128
+ pass
129
+ """
130
+
131
+ def wrap(f):
132
+ @wraps(f)
133
+ def wrapper(*args, **kwargs):
134
+ key = _get_metric_name(f)
135
+ try:
136
+ start = time.time()
137
+ result = f(*args, **kwargs)
138
+ put_metric(f"{key}.success", 1, group)
139
+ except Exception:
140
+ put_metric(f"{key}.failure", 1, group)
141
+ raise
142
+ finally:
143
+ put_metric(f"{key}.duration.ms", get_elapsed_time_ms(start), group) # type: ignore[possibly-undefined]
144
+ return result
145
+
146
+ return wrapper
147
+
148
+ if fn:
149
+ return wrap(fn)
150
+ else:
151
+ return wrap
152
+
153
+
154
+ @deprecated("Deprecated, use `@prof` instead", category=FutureWarning)
155
+ def profile(group=None):
156
+ """
157
+ @profile decorator adds latency and success/failure metrics to any given function.
158
+
159
+ Usage
160
+
161
+ ::
162
+
163
+ @metrics.profile("my_metric_group")
164
+ def some_function(<arguments>):
165
+ """
166
+
167
+ def wrap(func):
168
+ @wraps(func)
169
+ def wrapper(*args, **kwargs):
170
+ try:
171
+ start_time = time.time()
172
+ result = func(*args, **kwargs)
173
+ publish_metric(group, f"{func.__name__}.success", 1)
174
+ except Exception:
175
+ publish_metric(group, f"{func.__name__}.failure", 1)
176
+ raise
177
+ finally:
178
+ publish_metric(
179
+ group,
180
+ f"{func.__name__}.duration.ms",
181
+ get_elapsed_time_ms(start_time), # type: ignore[possibly-undefined]
182
+ )
183
+ return result
184
+
185
+ return wrapper
186
+
187
+ return wrap
188
+
189
+
190
+ def put_metric(metric_name: str, metric_value: int, metric_group: str = "torchelastic"):
191
+ """
192
+ Publish a metric data point.
193
+
194
+ Usage
195
+
196
+ ::
197
+
198
+ put_metric("metric_name", 1)
199
+ put_metric("metric_name", 1, "metric_group_name")
200
+ """
201
+ getStream(metric_group).add_value(metric_name, metric_value)
202
+
203
+
204
+ @deprecated(
205
+ "Deprecated, use `put_metric(metric_group)(metric_name, metric_value)` instead",
206
+ category=FutureWarning,
207
+ )
208
+ def publish_metric(metric_group: str, metric_name: str, metric_value: int):
209
+ metric_stream = getStream(metric_group)
210
+ metric_stream.add_value(metric_name, metric_value)
211
+
212
+
213
+ def get_elapsed_time_ms(start_time_in_seconds: float):
214
+ """Return the elapsed time in millis from the given start time."""
215
+ end_time = time.time()
216
+ return int((end_time - start_time_in_seconds) * 1000)
vllm/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.92 kB). View file
 
vllm/lib/python3.10/site-packages/torch/distributed/elastic/rendezvous/__init__.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ In the context of Torch Distributed Elastic we use the term *rendezvous* to
9
+ refer to a particular functionality that combines a **distributed
10
+ synchronization** primitive with **peer discovery**.
11
+
12
+ It is used by Torch Distributed Elastic to gather participants of a training
13
+ job (i.e. nodes) such that they all agree on the same list of participants and
14
+ everyone's roles, as well as make a consistent collective decision on when
15
+ training can begin/resume.
16
+
17
+ Torch Distributed Elastic rendezvous provides the following critical
18
+ functionalities:
19
+
20
+ **Barrier**:
21
+
22
+ Nodes performing rendezvous will all block until the rendezvous is considered
23
+ complete - this happens when at least ``min`` total number of nodes have joined
24
+ the rendezvous barrier (for the same job). This also implies the barrier is not
25
+ necessarily of fixed size.
26
+
27
+ There's an additional small waiting time after reaching ``min`` number of
28
+ nodes - this is used to ensure the rendezvous is not completed "too quickly"
29
+ (which could potentially exclude additional nodes attempting to join at
30
+ approximately the same time).
31
+
32
+ If ``max`` number of nodes is gathered at the barrier, the rendezvous is
33
+ completed immediately.
34
+
35
+ There's also an overall timeout which causes the rendezvous to fail if ``min``
36
+ number of nodes is never reached - this is meant to be a simple fail-safe to
37
+ help release partially allocated job resources, in case there's a problem with
38
+ the resource manager, and is meant to be interpreted as non-retryable.
39
+
40
+ **Exclusivity**:
41
+
42
+ A simple distributed barrier would not be sufficient, as we also need to ensure
43
+ that only one group of nodes exists at any given time (for a given job). In
44
+ other words, new nodes (i.e. joining late) should not be able to form a parallel
45
+ independent group of workers for the same job.
46
+
47
+ Torch Distributed Elastic rendezvous ensures that if a group of nodes has
48
+ already completed a rendezvous (and hence might already be training), then
49
+ additional "late" nodes attempting to rendezvous will only announce themselves
50
+ as waiting, and will have to wait until the (previously completed) existing
51
+ rendezvous is destroyed first.
52
+
53
+ **Consistency**:
54
+
55
+ When a rendezvous is completed, all its members will agree on the job membership
56
+ and everyone's role in it. This role is represented using an integer, called
57
+ rank, that is between between 0 and world size.
58
+
59
+ Note that ranks are *not stable*, in the sense that the same node can be
60
+ assigned a different rank in the next (re-)rendezvous.
61
+
62
+ **Fault-tolerance**:
63
+
64
+ Torch Distributed Elastic rendezvous is designed to tolerate node failures
65
+ during the rendezvous process. Should a process crash (or lose network
66
+ connectivity, etc), between joining the rendezvous and it being completed, then
67
+ a re-rendezvous with remaining healthy nodes will happen automatically.
68
+
69
+ A node can also fail *after* it has completed (or *has been observered* by other
70
+ nodes to have completed) the rendezvous - this scenario will be handled by the
71
+ Torch Distributed Elastic ``train_loop`` instead (where it will also trigger a
72
+ re-rendezvous).
73
+
74
+ **Shared key-value store**:
75
+
76
+ When the rendezvous is completed, a shared key-value store is created and
77
+ returned. This store implements a ``torch.distributed.Store`` API (see
78
+ `distributed communication docs
79
+ <https://pytorch.org/docs/stable/distributed.html>`__).
80
+
81
+ This store is only shared by the members of the completed rendezvous. It
82
+ is intended to be used by Torch Distributed Elastic to exchange information
83
+ necessary to initialize job control and data-planes.
84
+
85
+ **Waiting workers and rendezvous closing**:
86
+
87
+ Torch Distributed Elastic rendezvous handler object provides additional
88
+ functionalities, which are technically not part of the rendezvous process:
89
+
90
+ 1. Querying how many workers arrived late at the barrier, who can participate in
91
+ *next* rendezvous.
92
+
93
+ 2. Setting the rendezvous *closed* to signal all nodes not to participate in
94
+ next rendezvous.
95
+
96
+ **DynamicRendezvousHandler**:
97
+
98
+ Torch Distributed Elastic comes with the :py:class:`.DynamicRendezvousHandler`
99
+ class that implements the rendezvous mechanism described above. It is a backend-
100
+ agnostic type that expects a particular :py:class:`.RendezvousBackend` instance
101
+ to be specified during construction.
102
+
103
+ Torch distributed users can either implement their own backend type or use one
104
+ of the following implementations that come with PyTorch:
105
+
106
+ - :py:class:`.C10dRendezvousBackend`: Uses a C10d store (by default
107
+ ``TCPStore``) as the rendezvous backend. The main advantage of using a C10d
108
+ store is that it requires no 3rd-party dependency (such as etcd) to establish
109
+ a rendezvous.
110
+ - :py:class:`.EtcdRendezvousBackend`: Supersedes the legacy
111
+ :py:class:`.EtcdRendezvousHandler` class. Passing an
112
+ :py:class:`.EtcdRendezvousBackend` instance to
113
+ :py:class:`.DynamicRendezvousHandler` is functionally equivalent to
114
+ instantiating an :py:class:`.EtcdRendezvousHandler`.
115
+
116
+ ::
117
+
118
+ store = TCPStore("localhost")
119
+
120
+ backend = C10dRendezvousBackend(store, "my_run_id")
121
+
122
+ rdzv_handler = DynamicRendezvousHandler.from_backend(
123
+ run_id="my_run_id",
124
+ store=store,
125
+ backend=backend,
126
+ min_nodes=2,
127
+ max_nodes=4
128
+ )
129
+ """
130
+
131
+ from .api import (
132
+ rendezvous_handler_registry,
133
+ RendezvousClosedError,
134
+ RendezvousConnectionError,
135
+ RendezvousError,
136
+ RendezvousGracefulExitError,
137
+ RendezvousHandler,
138
+ RendezvousHandlerCreator,
139
+ RendezvousHandlerRegistry,
140
+ RendezvousInfo,
141
+ RendezvousParameters,
142
+ RendezvousStateError,
143
+ RendezvousStoreInfo,
144
+ RendezvousTimeoutError,
145
+ )
146
+ from .registry import _register_default_handlers
147
+
148
+
149
+ _register_default_handlers()
150
+
151
+
152
+ __all__ = [
153
+ "RendezvousClosedError",
154
+ "RendezvousConnectionError",
155
+ "RendezvousError",
156
+ "RendezvousGracefulExitError",
157
+ "RendezvousHandler",
158
+ "RendezvousHandlerCreator",
159
+ "RendezvousHandlerRegistry",
160
+ "RendezvousInfo",
161
+ "RendezvousParameters",
162
+ "RendezvousStateError",
163
+ "RendezvousStoreInfo",
164
+ "RendezvousTimeoutError",
165
+ "rendezvous_handler_registry",
166
+ ]