diff --git a/smoe_vcr/checkpoint-13312/latest b/smoe_vcr/checkpoint-13312/latest deleted file mode 100644 index 41750eb6d66126b6023d0560fd5c7875c0706774..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/latest +++ /dev/null @@ -1 +0,0 @@ -global_step13312 \ No newline at end of file diff --git a/smoe_vcr/checkpoint-13312/model-00001-of-00002.safetensors b/smoe_vcr/checkpoint-13312/model-00001-of-00002.safetensors deleted file mode 100644 index 5ee6759e18ffe2324661938bdd61453415e2bfe2..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/model-00001-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a1b08e9099f94c3d1a61dc176cb6ebace68cce03e5fb193dd2134237b6ec090d -size 4972489328 diff --git a/smoe_vcr/checkpoint-13312/model-00002-of-00002.safetensors b/smoe_vcr/checkpoint-13312/model-00002-of-00002.safetensors deleted file mode 100644 index da06ad886c0ac72413c4c0b097c0eeb6db12dffc..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/model-00002-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:29d557b44abd6c334cc7711d303922022efcba465e24bd2c199779f8582b4485 -size 3759015720 diff --git a/smoe_vcr/checkpoint-13312/rng_state_0.pth b/smoe_vcr/checkpoint-13312/rng_state_0.pth deleted file mode 100644 index 9231f69f5fd461899867106a669ce247e70c72c2..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f9f23d807f0e704f4ca79670a6631cbff43189cf7f8ff4e1fc0a4330e636a798 -size 14960 diff --git a/smoe_vcr/checkpoint-13312/rng_state_1.pth b/smoe_vcr/checkpoint-13312/rng_state_1.pth deleted file mode 100644 index 19fe2dcc766f192ea5de79cec4dcff17172a10f7..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1d37f92f6aea5386e84d2d64a1a25d6ef96a10b3bbbfe63627981604c8934076 -size 14960 diff --git a/smoe_vcr/checkpoint-13312/rng_state_2.pth b/smoe_vcr/checkpoint-13312/rng_state_2.pth deleted file mode 100644 index bfe492519c6b79b07a8d68b98c5f3d0c073667aa..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:667ebf727735115f00a6bdbe090344e9846c726d11bb555cdc201c415f27ad85 -size 14960 diff --git a/smoe_vcr/checkpoint-13312/rng_state_3.pth b/smoe_vcr/checkpoint-13312/rng_state_3.pth deleted file mode 100644 index 838d42ad13e30851fdbd1d8801738a4106a9ce8b..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:49d306f8c511cba8a225e3b723c5fa79d8a6ecc922f834da914ff0780c78b1fc -size 14960 diff --git a/smoe_vcr/checkpoint-13312/tokenizer.model b/smoe_vcr/checkpoint-13312/tokenizer.model deleted file mode 100644 index 6c00c742ce03c627d6cd5b795984876fa49fa899..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 -size 499723 diff --git a/smoe_vcr/checkpoint-13312/training_args.bin b/smoe_vcr/checkpoint-13312/training_args.bin deleted file mode 100644 index a6bf5dd8c2124ae4390fcff1f839a55a05673fb5..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e04cbeb93b4abbbbbea1e73f341a6942676570aa79d4df252055868d2df91fe -size 7992 diff --git a/smoe_vcr/checkpoint-13312/zero_to_fp32.py b/smoe_vcr/checkpoint-13312/zero_to_fp32.py deleted file mode 100644 index 24cc342e78d1a006c782b3a4cd68d9ce786d8fd8..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-13312/zero_to_fp32.py +++ /dev/null @@ -1,604 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _has_callable(obj, fn): - attr = getattr(obj, fn, None) - return callable(attr) - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, - args.output_file, - tag=args.tag, - exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/smoe_vcr/checkpoint-16632/latest b/smoe_vcr/checkpoint-16632/latest deleted file mode 100644 index e4087b037c4d90a88f08b57160ddc65e74a0c271..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/latest +++ /dev/null @@ -1 +0,0 @@ -global_step16632 \ No newline at end of file diff --git a/smoe_vcr/checkpoint-16632/model-00001-of-00002.safetensors b/smoe_vcr/checkpoint-16632/model-00001-of-00002.safetensors deleted file mode 100644 index a2e039e1186da0f5a2cb583c8aef3d2973e65d01..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/model-00001-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b980095539e1791498907a3250d8c638d9f7efb6c942efaa1efaab69e6631208 -size 4972489328 diff --git a/smoe_vcr/checkpoint-16632/model-00002-of-00002.safetensors b/smoe_vcr/checkpoint-16632/model-00002-of-00002.safetensors deleted file mode 100644 index 332e627d0fccd3f23ca5f6d0e149534c99da3bee..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/model-00002-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:da33b82157a6d9d3f145750aa1ffe39fa400f5186ca3e89728d628e59816cda0 -size 3759015720 diff --git a/smoe_vcr/checkpoint-16632/rng_state_0.pth b/smoe_vcr/checkpoint-16632/rng_state_0.pth deleted file mode 100644 index 9231f69f5fd461899867106a669ce247e70c72c2..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f9f23d807f0e704f4ca79670a6631cbff43189cf7f8ff4e1fc0a4330e636a798 -size 14960 diff --git a/smoe_vcr/checkpoint-16632/rng_state_1.pth b/smoe_vcr/checkpoint-16632/rng_state_1.pth deleted file mode 100644 index 19fe2dcc766f192ea5de79cec4dcff17172a10f7..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1d37f92f6aea5386e84d2d64a1a25d6ef96a10b3bbbfe63627981604c8934076 -size 14960 diff --git a/smoe_vcr/checkpoint-16632/rng_state_2.pth b/smoe_vcr/checkpoint-16632/rng_state_2.pth deleted file mode 100644 index bfe492519c6b79b07a8d68b98c5f3d0c073667aa..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:667ebf727735115f00a6bdbe090344e9846c726d11bb555cdc201c415f27ad85 -size 14960 diff --git a/smoe_vcr/checkpoint-16632/rng_state_3.pth b/smoe_vcr/checkpoint-16632/rng_state_3.pth deleted file mode 100644 index 838d42ad13e30851fdbd1d8801738a4106a9ce8b..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:49d306f8c511cba8a225e3b723c5fa79d8a6ecc922f834da914ff0780c78b1fc -size 14960 diff --git a/smoe_vcr/checkpoint-16632/tokenizer.model b/smoe_vcr/checkpoint-16632/tokenizer.model deleted file mode 100644 index 6c00c742ce03c627d6cd5b795984876fa49fa899..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 -size 499723 diff --git a/smoe_vcr/checkpoint-16632/training_args.bin b/smoe_vcr/checkpoint-16632/training_args.bin deleted file mode 100644 index a6bf5dd8c2124ae4390fcff1f839a55a05673fb5..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e04cbeb93b4abbbbbea1e73f341a6942676570aa79d4df252055868d2df91fe -size 7992 diff --git a/smoe_vcr/checkpoint-16632/zero_to_fp32.py b/smoe_vcr/checkpoint-16632/zero_to_fp32.py deleted file mode 100644 index 24cc342e78d1a006c782b3a4cd68d9ce786d8fd8..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-16632/zero_to_fp32.py +++ /dev/null @@ -1,604 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _has_callable(obj, fn): - attr = getattr(obj, fn, None) - return callable(attr) - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, - args.output_file, - tag=args.tag, - exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/smoe_vcr/checkpoint-3328/latest b/smoe_vcr/checkpoint-3328/latest deleted file mode 100644 index 2c27d5aabecd1a20f5d8e01a05251ed2cf0a7fec..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/latest +++ /dev/null @@ -1 +0,0 @@ -global_step3328 \ No newline at end of file diff --git a/smoe_vcr/checkpoint-3328/model-00001-of-00002.safetensors b/smoe_vcr/checkpoint-3328/model-00001-of-00002.safetensors deleted file mode 100644 index d65469b9b173abc2e0219205d2769e34df1f9475..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/model-00001-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7cd3580f5ef232bb8b4dbb925ef4d70dabfd796ce8ee29219ef113aac8f8799a -size 4972489328 diff --git a/smoe_vcr/checkpoint-3328/model-00002-of-00002.safetensors b/smoe_vcr/checkpoint-3328/model-00002-of-00002.safetensors deleted file mode 100644 index 681d0e2b7da0715392acfb3b7e563729dbb26080..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/model-00002-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ecdaaf1be9653d016d6ab7aabeb7615950b7c0f5c5f958421ee21255e0140483 -size 3759015720 diff --git a/smoe_vcr/checkpoint-3328/rng_state_0.pth b/smoe_vcr/checkpoint-3328/rng_state_0.pth deleted file mode 100644 index 9231f69f5fd461899867106a669ce247e70c72c2..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f9f23d807f0e704f4ca79670a6631cbff43189cf7f8ff4e1fc0a4330e636a798 -size 14960 diff --git a/smoe_vcr/checkpoint-3328/rng_state_1.pth b/smoe_vcr/checkpoint-3328/rng_state_1.pth deleted file mode 100644 index 19fe2dcc766f192ea5de79cec4dcff17172a10f7..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1d37f92f6aea5386e84d2d64a1a25d6ef96a10b3bbbfe63627981604c8934076 -size 14960 diff --git a/smoe_vcr/checkpoint-3328/rng_state_2.pth b/smoe_vcr/checkpoint-3328/rng_state_2.pth deleted file mode 100644 index bfe492519c6b79b07a8d68b98c5f3d0c073667aa..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:667ebf727735115f00a6bdbe090344e9846c726d11bb555cdc201c415f27ad85 -size 14960 diff --git a/smoe_vcr/checkpoint-3328/rng_state_3.pth b/smoe_vcr/checkpoint-3328/rng_state_3.pth deleted file mode 100644 index 838d42ad13e30851fdbd1d8801738a4106a9ce8b..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:49d306f8c511cba8a225e3b723c5fa79d8a6ecc922f834da914ff0780c78b1fc -size 14960 diff --git a/smoe_vcr/checkpoint-3328/tokenizer.model b/smoe_vcr/checkpoint-3328/tokenizer.model deleted file mode 100644 index 6c00c742ce03c627d6cd5b795984876fa49fa899..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 -size 499723 diff --git a/smoe_vcr/checkpoint-3328/training_args.bin b/smoe_vcr/checkpoint-3328/training_args.bin deleted file mode 100644 index a6bf5dd8c2124ae4390fcff1f839a55a05673fb5..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e04cbeb93b4abbbbbea1e73f341a6942676570aa79d4df252055868d2df91fe -size 7992 diff --git a/smoe_vcr/checkpoint-3328/zero_to_fp32.py b/smoe_vcr/checkpoint-3328/zero_to_fp32.py deleted file mode 100644 index 24cc342e78d1a006c782b3a4cd68d9ce786d8fd8..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-3328/zero_to_fp32.py +++ /dev/null @@ -1,604 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _has_callable(obj, fn): - attr = getattr(obj, fn, None) - return callable(attr) - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, - args.output_file, - tag=args.tag, - exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/smoe_vcr/checkpoint-6656/latest b/smoe_vcr/checkpoint-6656/latest deleted file mode 100644 index 24f37f789c4e6eb86270647db8ff45788e484aa2..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/latest +++ /dev/null @@ -1 +0,0 @@ -global_step6656 \ No newline at end of file diff --git a/smoe_vcr/checkpoint-6656/model-00001-of-00002.safetensors b/smoe_vcr/checkpoint-6656/model-00001-of-00002.safetensors deleted file mode 100644 index 7cfbba86c1e8058983bf1495bcc852e92b4bbaef..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/model-00001-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d89657f3f9f5a138987ff2a2ad3b59b04956a660f4b56c50ff2af692c1d5a62d -size 4972489328 diff --git a/smoe_vcr/checkpoint-6656/model-00002-of-00002.safetensors b/smoe_vcr/checkpoint-6656/model-00002-of-00002.safetensors deleted file mode 100644 index f9bcc6d0ef875e46a01807424c7de1b8cb9e2c81..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/model-00002-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9b77dcb153143b1e8e406b442235536ae4600ce71ba52a5be3a61e751b21d547 -size 3759015720 diff --git a/smoe_vcr/checkpoint-6656/rng_state_0.pth b/smoe_vcr/checkpoint-6656/rng_state_0.pth deleted file mode 100644 index 9231f69f5fd461899867106a669ce247e70c72c2..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f9f23d807f0e704f4ca79670a6631cbff43189cf7f8ff4e1fc0a4330e636a798 -size 14960 diff --git a/smoe_vcr/checkpoint-6656/rng_state_1.pth b/smoe_vcr/checkpoint-6656/rng_state_1.pth deleted file mode 100644 index 19fe2dcc766f192ea5de79cec4dcff17172a10f7..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1d37f92f6aea5386e84d2d64a1a25d6ef96a10b3bbbfe63627981604c8934076 -size 14960 diff --git a/smoe_vcr/checkpoint-6656/rng_state_2.pth b/smoe_vcr/checkpoint-6656/rng_state_2.pth deleted file mode 100644 index bfe492519c6b79b07a8d68b98c5f3d0c073667aa..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:667ebf727735115f00a6bdbe090344e9846c726d11bb555cdc201c415f27ad85 -size 14960 diff --git a/smoe_vcr/checkpoint-6656/rng_state_3.pth b/smoe_vcr/checkpoint-6656/rng_state_3.pth deleted file mode 100644 index 838d42ad13e30851fdbd1d8801738a4106a9ce8b..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:49d306f8c511cba8a225e3b723c5fa79d8a6ecc922f834da914ff0780c78b1fc -size 14960 diff --git a/smoe_vcr/checkpoint-6656/tokenizer.model b/smoe_vcr/checkpoint-6656/tokenizer.model deleted file mode 100644 index 6c00c742ce03c627d6cd5b795984876fa49fa899..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 -size 499723 diff --git a/smoe_vcr/checkpoint-6656/training_args.bin b/smoe_vcr/checkpoint-6656/training_args.bin deleted file mode 100644 index a6bf5dd8c2124ae4390fcff1f839a55a05673fb5..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e04cbeb93b4abbbbbea1e73f341a6942676570aa79d4df252055868d2df91fe -size 7992 diff --git a/smoe_vcr/checkpoint-6656/zero_to_fp32.py b/smoe_vcr/checkpoint-6656/zero_to_fp32.py deleted file mode 100644 index 24cc342e78d1a006c782b3a4cd68d9ce786d8fd8..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-6656/zero_to_fp32.py +++ /dev/null @@ -1,604 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _has_callable(obj, fn): - attr = getattr(obj, fn, None) - return callable(attr) - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, - args.output_file, - tag=args.tag, - exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/smoe_vcr/checkpoint-9984/latest b/smoe_vcr/checkpoint-9984/latest deleted file mode 100644 index 6c2bd85bc7c6d33e172c9c565d8517bade9572ca..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/latest +++ /dev/null @@ -1 +0,0 @@ -global_step9984 \ No newline at end of file diff --git a/smoe_vcr/checkpoint-9984/model-00001-of-00002.safetensors b/smoe_vcr/checkpoint-9984/model-00001-of-00002.safetensors deleted file mode 100644 index b07c2ae523340ce4cb7072e1fb9b706974fba8cd..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/model-00001-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1c2d8d9707620fd53145a82126507abbf6cad7994d252b0d44078afa1c2d09f3 -size 4972489328 diff --git a/smoe_vcr/checkpoint-9984/model-00002-of-00002.safetensors b/smoe_vcr/checkpoint-9984/model-00002-of-00002.safetensors deleted file mode 100644 index 2b21b54eba05ac04c35bc185da22adcbb63b4172..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/model-00002-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e36b4b833ff0dbd2e5e9972d3eea8b6d3426b3957a3d220d4339993610aa1ac6 -size 3759015720 diff --git a/smoe_vcr/checkpoint-9984/rng_state_0.pth b/smoe_vcr/checkpoint-9984/rng_state_0.pth deleted file mode 100644 index 9231f69f5fd461899867106a669ce247e70c72c2..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/rng_state_0.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f9f23d807f0e704f4ca79670a6631cbff43189cf7f8ff4e1fc0a4330e636a798 -size 14960 diff --git a/smoe_vcr/checkpoint-9984/rng_state_1.pth b/smoe_vcr/checkpoint-9984/rng_state_1.pth deleted file mode 100644 index 19fe2dcc766f192ea5de79cec4dcff17172a10f7..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/rng_state_1.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1d37f92f6aea5386e84d2d64a1a25d6ef96a10b3bbbfe63627981604c8934076 -size 14960 diff --git a/smoe_vcr/checkpoint-9984/rng_state_2.pth b/smoe_vcr/checkpoint-9984/rng_state_2.pth deleted file mode 100644 index bfe492519c6b79b07a8d68b98c5f3d0c073667aa..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/rng_state_2.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:667ebf727735115f00a6bdbe090344e9846c726d11bb555cdc201c415f27ad85 -size 14960 diff --git a/smoe_vcr/checkpoint-9984/rng_state_3.pth b/smoe_vcr/checkpoint-9984/rng_state_3.pth deleted file mode 100644 index 838d42ad13e30851fdbd1d8801738a4106a9ce8b..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/rng_state_3.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:49d306f8c511cba8a225e3b723c5fa79d8a6ecc922f834da914ff0780c78b1fc -size 14960 diff --git a/smoe_vcr/checkpoint-9984/tokenizer.model b/smoe_vcr/checkpoint-9984/tokenizer.model deleted file mode 100644 index 6c00c742ce03c627d6cd5b795984876fa49fa899..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 -size 499723 diff --git a/smoe_vcr/checkpoint-9984/training_args.bin b/smoe_vcr/checkpoint-9984/training_args.bin deleted file mode 100644 index a6bf5dd8c2124ae4390fcff1f839a55a05673fb5..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e04cbeb93b4abbbbbea1e73f341a6942676570aa79d4df252055868d2df91fe -size 7992 diff --git a/smoe_vcr/checkpoint-9984/zero_to_fp32.py b/smoe_vcr/checkpoint-9984/zero_to_fp32.py deleted file mode 100644 index 24cc342e78d1a006c782b3a4cd68d9ce786d8fd8..0000000000000000000000000000000000000000 --- a/smoe_vcr/checkpoint-9984/zero_to_fp32.py +++ /dev/null @@ -1,604 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets -# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in -# the future. Once extracted, the weights don't require DeepSpeed and can be used in any -# application. -# -# example: python zero_to_fp32.py . pytorch_model.bin - -import argparse -import torch -import glob -import math -import os -import re -from collections import OrderedDict -from dataclasses import dataclass - -# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with -# DeepSpeed data structures it has to be available in the current python environment. -from deepspeed.utils import logger -from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, - FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, - FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) - - -@dataclass -class zero_model_state: - buffers: dict() - param_shapes: dict() - shared_params: list - ds_version: int - frozen_param_shapes: dict() - frozen_param_fragments: dict() - - -debug = 0 - -# load to cpu -device = torch.device('cpu') - - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - ''' - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - ''' - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_model_state_file(checkpoint_dir, zero_stage): - if not os.path.isdir(checkpoint_dir): - raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") - - # there should be only one file - if zero_stage <= 2: - file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") - elif zero_stage == 3: - file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") - - if not os.path.exists(file): - raise FileNotFoundError(f"can't find model states file at '{file}'") - - return file - - -def get_checkpoint_files(checkpoint_dir, glob_pattern): - # XXX: need to test that this simple glob rule works for multi-node setup too - ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) - - if len(ckpt_files) == 0: - raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") - - return ckpt_files - - -def get_optim_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") - - -def get_model_state_files(checkpoint_dir): - return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") - - -def parse_model_states(files): - zero_model_states = [] - for file in files: - state_dict = torch.load(file, map_location=device) - - if BUFFER_NAMES not in state_dict: - raise ValueError(f"{file} is not a model state checkpoint") - buffer_names = state_dict[BUFFER_NAMES] - if debug: - print("Found buffers:", buffer_names) - - # recover just the buffers while restoring them to fp32 if they were saved in fp16 - buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} - param_shapes = state_dict[PARAM_SHAPES] - - # collect parameters that are included in param_shapes - param_names = [] - for s in param_shapes: - for name in s.keys(): - param_names.append(name) - - # update with frozen parameters - frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) - if frozen_param_shapes is not None: - if debug: - print(f"Found frozen_param_shapes: {frozen_param_shapes}") - param_names += list(frozen_param_shapes.keys()) - - # handle shared params - shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] - - ds_version = state_dict.get(DS_VERSION, None) - - frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) - - z_model_state = zero_model_state(buffers=buffers, - param_shapes=param_shapes, - shared_params=shared_params, - ds_version=ds_version, - frozen_param_shapes=frozen_param_shapes, - frozen_param_fragments=frozen_param_fragments) - zero_model_states.append(z_model_state) - - return zero_model_states - - -def parse_optim_states(files, ds_checkpoint_dir): - - total_files = len(files) - state_dicts = [] - for f in files: - state_dict = torch.load(f, map_location=device) - # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights - # and also handle the case where it was already removed by another helper script - state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) - state_dicts.append(state_dict) - - if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: - raise ValueError(f"{files[0]} is not a zero checkpoint") - zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] - world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] - - # For ZeRO-2 each param group can have different partition_count as data parallelism for expert - # parameters can be different from data parallelism for non-expert parameters. So we can just - # use the max of the partition_count to get the dp world_size. - - if type(world_size) is list: - world_size = max(world_size) - - if world_size != total_files: - raise ValueError( - f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " - "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." - ) - - # the groups are named differently in each stage - if zero_stage <= 2: - fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS - elif zero_stage == 3: - fp32_groups_key = FP32_FLAT_GROUPS - else: - raise ValueError(f"unknown zero stage {zero_stage}") - - if zero_stage <= 2: - fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] - elif zero_stage == 3: - # if there is more than one param group, there will be multiple flattened tensors - one - # flattened tensor per group - for simplicity merge them into a single tensor - # - # XXX: could make the script more memory efficient for when there are multiple groups - it - # will require matching the sub-lists of param_shapes for each param group flattened tensor - - fp32_flat_groups = [ - torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) - ] - - return zero_stage, world_size, fp32_flat_groups - - -def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): - """ - Returns fp32 state_dict reconstructed from ds checkpoint - - Args: - - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) - - """ - print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") - - optim_files = get_optim_files(ds_checkpoint_dir) - zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) - print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") - - model_files = get_model_state_files(ds_checkpoint_dir) - - zero_model_states = parse_model_states(model_files) - print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') - - if zero_stage <= 2: - return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - elif zero_stage == 3: - return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters) - - -def _zero2_merge_frozen_params(state_dict, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - frozen_param_fragments = zero_model_states[0].frozen_param_fragments - - if debug: - num_elem = sum(s.numel() for s in frozen_param_shapes.values()) - print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - state_dict[name] = frozen_param_fragments[name] - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _has_callable(obj, fn): - attr = getattr(obj, fn, None) - return callable(attr) - - -def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - - # Reconstruction protocol: - # - # XXX: document this - - if debug: - for i in range(world_size): - for j in range(len(fp32_flat_groups[0])): - print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") - - # XXX: memory usage doubles here (zero2) - num_param_groups = len(fp32_flat_groups[0]) - merged_single_partition_of_fp32_groups = [] - for i in range(num_param_groups): - merged_partitions = [sd[i] for sd in fp32_flat_groups] - full_single_fp32_vector = torch.cat(merged_partitions, 0) - merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) - avail_numel = sum( - [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) - - if debug: - wanted_params = sum([len(shapes) for shapes in param_shapes]) - wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) - # not asserting if there is a mismatch due to possible padding - print(f"Have {avail_numel} numels to process.") - print(f"Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - total_numel = 0 - total_params = 0 - for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): - offset = 0 - avail_numel = full_single_fp32_vector.numel() - for name, shape in shapes.items(): - - unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) - total_numel += unpartitioned_numel - total_params += 1 - - if debug: - print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") - state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) - offset += unpartitioned_numel - - # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and - # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex - # paddings performed in the code it's almost impossible to predict the exact numbers w/o the - # live optimizer object, so we are checking that the numbers are within the right range - align_to = 2 * world_size - - def zero2_align(x): - return align_to * math.ceil(x / align_to) - - if debug: - print(f"original offset={offset}, avail_numel={avail_numel}") - - offset = zero2_align(offset) - avail_numel = zero2_align(avail_numel) - - if debug: - print(f"aligned offset={offset}, avail_numel={avail_numel}") - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero2_merge_frozen_params(state_dict, zero_model_states) - - _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def zero3_partitioned_param_info(unpartitioned_numel, world_size): - remainder = unpartitioned_numel % world_size - padding_numel = (world_size - remainder) if remainder else 0 - partitioned_numel = math.ceil(unpartitioned_numel / world_size) - return partitioned_numel, padding_numel - - -def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): - if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: - return - - if debug: - for i in range(world_size): - num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) - print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') - - frozen_param_shapes = zero_model_states[0].frozen_param_shapes - wanted_params = len(frozen_param_shapes) - wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) - avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size - print(f'Frozen params: Have {avail_numel} numels to process.') - print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') - - total_params = 0 - total_numel = 0 - for name, shape in zero_model_states[0].frozen_param_shapes.items(): - total_params += 1 - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - - param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) - state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") - - -def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): - param_shapes = zero_model_states[0].param_shapes - avail_numel = fp32_flat_groups[0].numel() * world_size - # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each - # param, re-consolidating each param, while dealing with padding if any - - # merge list of dicts, preserving order - param_shapes = {k: v for d in param_shapes for k, v in d.items()} - - if debug: - for i in range(world_size): - print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") - - wanted_params = len(param_shapes) - wanted_numel = sum(shape.numel() for shape in param_shapes.values()) - # not asserting if there is a mismatch due to possible padding - avail_numel = fp32_flat_groups[0].numel() * world_size - print(f"Trainable params: Have {avail_numel} numels to process.") - print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") - - # params - # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support - # out-of-core computing solution - offset = 0 - total_numel = 0 - total_params = 0 - for name, shape in param_shapes.items(): - - unpartitioned_numel = shape.numel() - total_numel += unpartitioned_numel - total_params += 1 - - partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) - - if debug: - print( - f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" - ) - - # XXX: memory usage doubles here - state_dict[name] = torch.cat( - tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), - 0).narrow(0, 0, unpartitioned_numel).view(shape) - offset += partitioned_numel - - offset *= world_size - - # Sanity check - if offset != avail_numel: - raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") - - print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") - - -def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, - exclude_frozen_parameters): - state_dict = OrderedDict() - - # buffers - buffers = zero_model_states[0].buffers - state_dict.update(buffers) - if debug: - print(f"added {len(buffers)} buffers") - - if not exclude_frozen_parameters: - _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) - - _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) - - # recover shared parameters - for pair in zero_model_states[0].shared_params: - if pair[1] in state_dict: - state_dict[pair[0]] = state_dict[pair[1]] - - return state_dict - - -def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with - ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example - via a model hub. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - - Returns: - - pytorch ``state_dict`` - - Note: this approach may not work if your application doesn't have sufficient free CPU memory and - you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with - the checkpoint. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint - # do the training and checkpoint saving - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu - model = model.cpu() # move to cpu - model.load_state_dict(state_dict) - # submit to model hub or save the model to share with others - - In this example the ``model`` will no longer be usable in the deepspeed context of the same - application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. - - """ - if tag is None: - latest_path = os.path.join(checkpoint_dir, 'latest') - if os.path.isfile(latest_path): - with open(latest_path, 'r') as fd: - tag = fd.read().strip() - else: - raise ValueError(f"Unable to find 'latest' file at {latest_path}") - - ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) - - if not os.path.isdir(ds_checkpoint_dir): - raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") - - return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) - - -def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False): - """ - Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be - loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. - - Args: - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - ``exclude_frozen_parameters``: exclude frozen parameters - """ - - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters) - print(f"Saving fp32 state dict to {output_file}") - torch.save(state_dict, output_file) - - -def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): - """ - 1. Put the provided model to cpu - 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` - 3. Load it into the provided model - - Args: - - ``model``: the model object to update - - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` - - Returns: - - ``model`: modified model - - Make sure you have plenty of CPU memory available before you call this function. If you don't - have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it - conveniently placed for you in the checkpoint folder. - - A typical usage might be :: - - from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint - model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) - # submit to model hub or save the model to share with others - - Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context - of the same application. i.e. you will need to re-initialize the deepspeed engine, since - ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. - - """ - logger.info(f"Extracting fp32 weights") - state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) - - logger.info(f"Overwriting model with fp32 weights") - model = model.cpu() - model.load_state_dict(state_dict, strict=False) - - return model - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("checkpoint_dir", - type=str, - help="path to the desired checkpoint folder, e.g., path/checkpoint-12") - parser.add_argument( - "output_file", - type=str, - help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") - parser.add_argument("-t", - "--tag", - type=str, - default=None, - help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") - parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") - parser.add_argument("-d", "--debug", action='store_true', help="enable debug") - args = parser.parse_args() - - debug = args.debug - - convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, - args.output_file, - tag=args.tag, - exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/smoe_vcr/model-00001-of-00002.safetensors b/smoe_vcr/model-00001-of-00002.safetensors deleted file mode 100644 index a2e039e1186da0f5a2cb583c8aef3d2973e65d01..0000000000000000000000000000000000000000 --- a/smoe_vcr/model-00001-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b980095539e1791498907a3250d8c638d9f7efb6c942efaa1efaab69e6631208 -size 4972489328 diff --git a/smoe_vcr/model-00002-of-00002.safetensors b/smoe_vcr/model-00002-of-00002.safetensors deleted file mode 100644 index 332e627d0fccd3f23ca5f6d0e149534c99da3bee..0000000000000000000000000000000000000000 --- a/smoe_vcr/model-00002-of-00002.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:da33b82157a6d9d3f145750aa1ffe39fa400f5186ca3e89728d628e59816cda0 -size 3759015720 diff --git a/smoe_vcr/tokenizer.model b/smoe_vcr/tokenizer.model deleted file mode 100644 index 6c00c742ce03c627d6cd5b795984876fa49fa899..0000000000000000000000000000000000000000 --- a/smoe_vcr/tokenizer.model +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 -size 499723 diff --git a/smoe_vcr/training_args.bin b/smoe_vcr/training_args.bin deleted file mode 100644 index a6bf5dd8c2124ae4390fcff1f839a55a05673fb5..0000000000000000000000000000000000000000 --- a/smoe_vcr/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e04cbeb93b4abbbbbea1e73f341a6942676570aa79d4df252055868d2df91fe -size 7992