python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import argparse
import time
import logging
import jwt
import json
from verifier.attestation import AttestationReport
from verifier.rim import RIM
from verifier.nvml import (
NvmlHandler,
NvmlHandlerTest,
)
from verifier.verifier import Verifier
from verifier.config import (
BaseSettings,
HopperSettings,
event_log,
info_log,
__author__,
__copyright__,
__version__,
)
from verifier.exceptions import (
Error,
RIMFetchError,
NoGpuFoundError,
UnsupportedGpuArchitectureError,
CertChainVerificationFailureError,
AttestationReportVerificationError,
RIMVerificationFailureError,
UnknownGpuArchitectureError,
)
from verifier.exceptions.utils import (
is_non_fatal_issue,
need_to_change_gpu_state,
)
from verifier.cc_admin_utils import CcAdminUtils
def main():
""" The main function for the CC admin tool.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-v",
"--verbose",
help="Print more detailed output.",
action="store_true",
)
parser.add_argument(
"--test_no_gpu",
help="""If there is no gpu and we
need to test the verifier, then no nvml apis will be available so, the verifier
will use a hardcoded gpu info.""",
action="store_true",
)
parser.add_argument(
"--driver_rim",
help="The path to the driver RIM.",
default="/usr/share/nvidia/rim/RIM_GH100PROD.swidtag"
)
parser.add_argument(
"--vbios_rim",
help="The path to the VBIOS RIM.",
)
parser.add_argument(
"--user_mode",
help="Runs the gpu attestation in user mode.",
action="store_true",
)
args = parser.parse_args()
arguments_as_dictionary = vars(args)
return attest(arguments_as_dictionary)
def attest(arguments_as_dictionary):
""" Method to perform GPU Attestation and return an Attestation Response.
Args:
arguments_as_dictionary (Dictionary): the dictionary object containing Attestation Options.
Raises:
Different Errors regarding GPU Attestation
Returns:
A tuple containing Attestation result (boolean) and Attestation JWT claims(JWT Object)
"""
overall_status = False
verified_claims = {}
try:
if arguments_as_dictionary['verbose']:
info_log.setLevel(logging.DEBUG)
if arguments_as_dictionary['test_no_gpu']:
event_log.info("Running in test_no_gpu mode.")
number_of_available_gpus = NvmlHandlerTest.get_number_of_gpus()
else:
event_log.debug("Initializing the nvml library")
NvmlHandler.init_nvml()
if not NvmlHandler.is_cc_enabled():
err_msg = "The confidential compute feature is disabled !!\nQuitting now."
raise Error(err_msg)
if NvmlHandler.is_cc_dev_mode():
info_log.info("The system is running in CC DevTools mode !!")
number_of_available_gpus = NvmlHandler.get_number_of_gpus()
if number_of_available_gpus == 0:
err_msg = "No GPU found"
info_log.critical(err_msg)
raise NoGpuFoundError(err_msg)
BaseSettings.mark_gpu_as_available()
info_log.info(f'Number of GPUs available : {number_of_available_gpus}')
for i in range(number_of_available_gpus):
info_log.info("-----------------------------------")
info_log.info(f'Fetching GPU {i} information from GPU driver.')
nonce_for_attestation_report = CcAdminUtils.generate_nonce(BaseSettings.SIZE_OF_NONCE_IN_BYTES)
if arguments_as_dictionary['test_no_gpu']:
nonce_for_attestation_report = BaseSettings.NONCE
gpu_info_obj = NvmlHandlerTest(settings=BaseSettings)
else:
gpu_info_obj = NvmlHandler(index=i, nonce=nonce_for_attestation_report, settings=BaseSettings)
if gpu_info_obj.get_gpu_architecture() == 'HOPPER':
event_log.debug(f'The architecture of the GPU with index {i} is HOPPER')
settings = HopperSettings()
if arguments_as_dictionary['driver_rim'] is None and not arguments_as_dictionary['test_no_gpu']:
raise RIMFetchError("Driver RIM file path not provided!!")
HopperSettings.set_driver_rim_path(arguments_as_dictionary['driver_rim'])
HopperSettings.set_vbios_rim_path(arguments_as_dictionary['vbios_rim'])
if arguments_as_dictionary['test_no_gpu']:
HopperSettings.set_driver_rim_path(HopperSettings.TEST_NO_GPU_DRIVER_RIM_PATH)
HopperSettings.set_vbios_rim_path(HopperSettings.TEST_NO_GPU_VBIOS_RIM_PATH)
else:
err_msg = "Unknown GPU architecture."
event_log.error(err_msg)
raise UnknownGpuArchitectureError(err_msg)
event_log.debug("GPU info fetched successfully.")
settings.mark_gpu_info_fetched()
info_log.info(f'VERIFYING GPU : {i}')
if gpu_info_obj.get_gpu_architecture() != settings.GpuArch:
err_msg = "\tGPU architecture is not supported."
event_log.error(err_msg)
raise UnsupportedGpuArchitectureError(err_msg)
event_log.debug("\tGPU architecture is correct.")
settings.mark_gpu_arch_is_correct()
driver_version = gpu_info_obj.get_driver_version()
vbios_version = gpu_info_obj.get_vbios_version()
vbios_version = vbios_version.lower()
info_log.info(f'\tDriver version fetched : {driver_version}')
info_log.info(f'\tVBIOS version fetched : {vbios_version}')
event_log.debug(f'GPU info fetched : \n\t\t{vars(gpu_info_obj)}')
info_log.info("\tValidating GPU certificate chains.")
gpu_attestation_cert_chain = gpu_info_obj.get_attestation_cert_chain()
for certificate in gpu_attestation_cert_chain:
cert = certificate.to_cryptography()
issuer = cert.issuer.public_bytes()
subject = cert.subject.public_bytes()
if issuer == subject:
event_log.debug("Root certificate is a available.")
settings.mark_root_cert_available()
gpu_leaf_cert = (gpu_attestation_cert_chain[0])
event_log.debug("\t\tverifying attestation certificate chain.")
cert_verification_status = CcAdminUtils.verify_certificate_chain(gpu_attestation_cert_chain,
settings,
BaseSettings.Certificate_Chain_Verification_Mode.GPU_ATTESTATION)
if not cert_verification_status:
err_msg = "\t\tGPU attestation report certificate chain validation failed."
event_log.error(err_msg)
raise CertChainVerificationFailureError(err_msg)
else:
settings.mark_gpu_cert_chain_verified()
info_log.info("\t\tGPU attestation report certificate chain validation successful.")
cert_chain_revocation_status = CcAdminUtils.ocsp_certificate_chain_validation(gpu_attestation_cert_chain,
settings,
BaseSettings.Certificate_Chain_Verification_Mode.GPU_ATTESTATION)
if not cert_chain_revocation_status:
err_msg = "\t\tGPU attestation report certificate chain revocation validation failed."
event_log.error(err_msg)
raise CertChainVerificationFailureError(err_msg)
settings.mark_gpu_cert_check_complete()
info_log.info("\tAuthenticating attestation report")
attestation_report_data = gpu_info_obj.get_attestation_report()
attestation_report_obj = AttestationReport(attestation_report_data, settings)
attestation_report_obj.print_obj(info_log)
settings.mark_attestation_report_parsed()
attestation_report_verification_status = CcAdminUtils.verify_attestation_report(
attestation_report_obj=attestation_report_obj,
gpu_leaf_certificate=gpu_leaf_cert,
nonce=nonce_for_attestation_report,
driver_version=driver_version,
vbios_version=vbios_version,
settings=settings)
if attestation_report_verification_status:
settings.mark_attestation_report_verified()
info_log.info("\t\tAttestation report verification successful.")
else:
err_msg = "\t\tAttestation report verification failed."
event_log.error(err_msg)
raise AttestationReportVerificationError(err_msg)
# performing the schema validation and signature verification if the driver RIM.
info_log.info("\tAuthenticating the RIMs.")
info_log.info("\t\tAuthenticating Driver RIM")
driver_rim = RIM(settings.DRIVER_RIM_PATH, rim_name='driver', settings=settings)
driver_rim_verification_status = driver_rim.verify(version=driver_version, settings=settings)
if driver_rim_verification_status:
settings.mark_driver_rim_signature_verified()
info_log.info("\t\t\tDriver RIM verification successful")
else:
event_log.error("\t\t\tDriver RIM verification failed.")
raise RIMVerificationFailureError("\t\t\tDriver RIM verification failed.\n\t\t\tQuitting now.")
# performing the schema validation and signature verification if the vbios RIM.
info_log.info("\t\tAuthenticating VBIOS RIM.")
vbios_rim_path = settings.VBIOS_RIM_PATH
if arguments_as_dictionary["vbios_rim"] is None and not arguments_as_dictionary['test_no_gpu']:
vbios_rim_path = CcAdminUtils.get_vbios_rim_path(settings, attestation_report_obj)
vbios_rim = RIM(vbios_rim_path, rim_name='vbios', settings=settings)
vbios_rim_verification_status = vbios_rim.verify(version=vbios_version, settings=settings)
if vbios_rim_verification_status:
settings.mark_vbios_rim_signature_verified()
info_log.info("\t\t\tVBIOS RIM verification successful")
else:
event_log.error("\t\tVBIOS RIM verification failed.")
raise RIMVerificationFailureError("\t\tVBIOS RIM verification failed.\n\tQuitting now.")
verifier_obj = Verifier(attestation_report_obj, driver_rim, vbios_rim, settings=settings)
verifier_obj.verify(settings)
# Checking the attestation status.
if settings.check_status():
if not arguments_as_dictionary["user_mode"] and not arguments_as_dictionary['test_no_gpu']:
info_log.info("\tSetting the GPU Ready State to READY.")
NvmlHandler.set_gpu_ready_state(True)
info_log.info(f'\tGPU {i} verified successfully.')
elif arguments_as_dictionary['test_no_gpu']:
pass
else:
if not NvmlHandler.is_cc_dev_mode() and not arguments_as_dictionary["user_mode"]:
info_log.info("\tSetting the GPU Ready State to NOT READY.")
NvmlHandler.set_gpu_ready_state(False)
elif NvmlHandler.is_cc_dev_mode() and not arguments_as_dictionary["user_mode"]:
info_log.info("\tSetting the GPU Ready State to READY as the system is in DEV mode.")
NvmlHandler.set_gpu_ready_state(True)
info_log.info(f'The verification of GPU {i} resulted in failure.')
if i == 0:
overall_status = settings.check_status()
else:
overall_status = overall_status and settings.check_status()
except Exception as error:
info_log.error(error)
if arguments_as_dictionary['test_no_gpu']:
return
if is_non_fatal_issue(error):
retry(error, arguments_as_dictionary["user_mode"])
elif need_to_change_gpu_state(error):
if not NvmlHandler.is_cc_dev_mode() and not arguments_as_dictionary["user_mode"]:
info_log.info("\tSetting the GPU Ready State to NOT READY.")
NvmlHandler.set_gpu_ready_state(False)
elif NvmlHandler.is_cc_dev_mode() and not arguments_as_dictionary["user_mode"]:
info_log.info("\tSetting the GPU Ready State to READY as the system is in DEV mode.")
NvmlHandler.set_gpu_ready_state(True)
finally:
event_log.debug("-----------------------------------")
if overall_status:
info_log.info(f"\tGPU Attested Successfully")
else:
info_log.info(f"\tGPU Attestation failed")
# check status and update the claims list in the finally block such that
# un-checked claims will be false in case of exceptions
if 'gpu_info_obj' in locals():
settings.check_status()
verified_claims = settings.claims
verified_claims['x-nv-gpu-uuid'] = gpu_info_obj.get_uuid()
else:
verified_claims = {}
formatted_claims_str = json.dumps(verified_claims, indent=2)
event_log.debug(f"\tGPU Verified claims list : {formatted_claims_str}")
event_log.debug("-----------------------------------")
jwt_claims = create_jwt_token(verified_claims)
return overall_status, jwt_claims
event_log.debug("-----------ENDING-----------")
def create_jwt_token(gpu_claims_list: any):
""" Method to create a JWT token from JSON claims object
Args:
gpu_claims_list: list of Attestation Claims in JSON.
Returns:
JWT token that corresponds to the Claims.
"""
encoded_data = jwt.encode(gpu_claims_list,
'secret',
"HS256")
return encoded_data
def retry(error, is_user_mode):
""" This function is used to retry the GPU attestation again in case of occurrence of
certain types of exceptions.
Args:
error (exceptions.Error): The exception that have occurred.
is_user_mode (bool): If the cc_admin tool is being used in user_mode then it does not
changes the gpu ready state.
"""
# Clean-up
NvmlHandler.close_nvml()
if BaseSettings.is_retry_allowed():
time.sleep(BaseSettings.MAX_TIME_DELAY)
info_log.info("Retrying the GPU attestation.")
main()
elif need_to_change_gpu_state(error) and not is_user_mode and not NvmlHandler.is_cc_dev_mode():
info_log.info("Setting the GPU Ready State to not Ready.")
NvmlHandler.set_gpu_ready_state(False)
if __name__ == "__main__":
main()
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/cc_admin.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from verifier.config import (
BaseSettings,
info_log,
__author__,
__copyright__,
__version__,
)
from verifier.utils import is_zeros
from verifier.exceptions import InvalidMeasurementIndexError
class Verifier:
""" A class to match the runtime GPU measurements against the golden
measurements.
"""
def verify(self, settings):
""" This methods compares the runtime measurement with the golden measurement in order to check if there is any discrepancy.
Args:
settings (config.HopperSettings): the object containing the various config info.
Returns:
[bool]: returns True if all the valid golden measurements values matches with the
corresponding runtime measurements. Otherwise, returns False.
"""
info_log.info("\tComparing measurements (runtime vs golden)")
if len(self.runtime_measurements) == 0:
info_log.warning("\t\t\tWarning : no measurements from attestation report received.")
if len(self.golden_measurements) == 0:
info_log.warning("\t\t\tWarning : no golden measurements from RIMs received.")
list_of_mismatched_indexes = list()
for i in self.golden_measurements:
if i == 35 and not self.is_msr_35_valid:
continue
is_matching = False
for j in range(self.golden_measurements[i].get_number_of_alternatives()):
if self.golden_measurements[i].get_value_at_index(j) == self.runtime_measurements[i] and \
self.golden_measurements[i].get_size() == len(self.runtime_measurements[i]) // 2:
is_matching = True
if not is_matching:
# Measurements are not matching.
list_of_mismatched_indexes.append(i)
if len(list_of_mismatched_indexes) > 0:
info_log.info("""\t\t\tThe runtime measurements are not matching with the
golden measurements at the following indexes(starting from 0) :\n\t\t\t[""")
list_of_mismatched_indexes.sort()
for i, index in enumerate(list_of_mismatched_indexes):
if i != len(list_of_mismatched_indexes) - 1:
info_log.info(f'\t\t\t{index}, ')
else:
info_log.info("\t\t\t"+str(index))
info_log.info("\t\t\t]")
return False
else:
info_log.info("\t\t\tThe runtime measurements are matching with the golden measurements.\
\n\t\tGPU is in expected state.")
settings.mark_measurements_as_matching()
return True
def generate_golden_measurement_list(self, driver_golden_measurements, vbios_golden_measurements, settings):
""" This method takes the driver and vbios golden measurements and
combines them into a single dictionary with the measurement index as
the key and the golden measurement object as the value.
Args:
driver_golden_measurements (dict): the dictionary containing the driver golden measurements.
vbios_golden_measurements (dict): the dictionary containing the vbios golden measurements.
settings (config.HopperSettings): the object containing the various config info.
Raises:
InvalidMeasurementIndexError: it is raised in case both the driver and vbios RIM file have
active measurement at the same index.
"""
self.golden_measurements = dict()
for gld_msr_idx in driver_golden_measurements:
if driver_golden_measurements[gld_msr_idx].is_active():
self.golden_measurements[gld_msr_idx] = driver_golden_measurements[gld_msr_idx]
for gld_msr_idx in vbios_golden_measurements:
if vbios_golden_measurements[gld_msr_idx].is_active() and \
gld_msr_idx in self.golden_measurements:
raise InvalidMeasurementIndexError(f"The driver and vbios RIM have measurement at the same index : {gld_msr_idx}")
elif vbios_golden_measurements[gld_msr_idx].is_active():
self.golden_measurements[gld_msr_idx] = vbios_golden_measurements[gld_msr_idx]
settings.mark_no_driver_vbios_measurement_index_conflict()
def __init__(self, attestation_report_obj, driver_rim_obj, vbios_rim_obj, settings):
""" The constructor method for the Verifier class.
Args:
attestation_report_obj (AttestationReport): the attestation report.
driver_rim_obj (rim.RIM): the driver RIM object containing the the driver golden measurements.
vbios_rim_obj (rim.RIM): the vbios RIM object containing the vbios golden measurement.
settings (config.HopperSettings): the object containing the various config info.
"""
self.is_msr_35_valid = True
if attestation_report_obj.get_response_message().get_opaque_data().get_data("OPAQUE_FIELD_ID_NVDEC0_STATUS") == BaseSettings.NVDEC_STATUS.DISABLED:
self.is_msr_35_valid = False
self.generate_golden_measurement_list(driver_rim_obj.get_measurements(), vbios_rim_obj.get_measurements(), settings)
self.runtime_measurements = attestation_report_obj.get_measurements()
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/verifier.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import io
from signxml import XMLVerifier
from signxml.exceptions import InvalidSignature
from lxml import etree
from OpenSSL import crypto
from .golden_measurement import GoldenMeasurement
from verifier.config import (
BaseSettings,
event_log,
info_log,
__author__,
__copyright__,
__version__,
)
from verifier.cc_admin_utils import CcAdminUtils
from verifier.exceptions import (
ElementNotFoundError,
EmptyElementError,
InvalidCertificateError,
NoRIMMeasurementsError,
RIMSchemaValidationError,
RIMFetchError,
RIMSignatureVerificationError,
InvalidMeasurementIndexError,
InvalidRIMNameError,
RIMCertChainVerificationError,
RIMCertChainOCSPVerificationError,
)
class RIM:
""" A class to process and manage all the processing of the RIM files.
RIM module Trusted Computing Group Reference Integrity Manifest of the
Verifier is used to perform the authentication and access of the golden
measurements.
"""
@staticmethod
def get_element(parent_element, name_of_element):
""" A static method that gives the child element of the parent_element with the given name.
Args:
parent_element (lxml.etree._Element): the parent of the required element.
name_of_element (str): the name of the required element.
Returns:
[lxml.etree._Element]: the required element.
"""
assert isinstance(parent_element, etree._Element)
assert type(name_of_element) is str
for child in parent_element.getchildren():
if (child.tag).find(name_of_element) != -1:
return child
return None
@staticmethod
def get_all_elements(parent_element, name_of_element):
assert isinstance(parent_element, etree._Element)
assert type(name_of_element) is str
list_of_elements = list()
for child in parent_element.getchildren():
if (child.tag).find(name_of_element) != -1:
list_of_elements.append(child)
return list_of_elements
@staticmethod
def read(base_RIM_path):
""" Static method that reads the signed base RIM from the disk.
Argument:
base_RIM_path (str) : the path to the signed base RIM.
Returns:
root (lxml.etree._Element) : the root element of the base RIM.
"""
try:
assert type(base_RIM_path) is str
with open(base_RIM_path, 'rb') as f:
read_data = f.read()
except OSError:
event_log.error(f'Unable to read {base_RIM_path} \nPlease provide a valid RIM file.')
raise RIMFetchError(f'Unable to read {base_RIM_path} \nPlease provide a valid RIM file.')
file_stream = io.BytesIO(read_data)
parser = etree.XMLParser(resolve_entities=False)
new_swidtag_tree = etree.parse(file_stream, parser)
new_root = new_swidtag_tree.getroot()
return new_root
def validate_schema(self, schema_path):
""" Performs the schema validation of the base RIM against a given schema.
Args:
schema_path (str): the path to the swidtag schema xsd file.
Returns:
[bool]: Ture if the schema validation is successful otherwise, returns False.
"""
try:
parser = etree.XMLParser(resolve_entities=False)
xml_schema_document = etree.parse(schema_path, parser)
xml_schema = etree.XMLSchema(xml_schema_document)
result = xml_schema.validate(self.root)
except Exception:
err_msg = "\t\tRIM Schema validation failed."
event_log.error(err_msg)
raise RIMSchemaValidationError(err_msg)
return result
def get_colloquial_version(self):
""" Parses RIM to return the driver/vbios version which is present in the RIM as
colloquial version.
Raises:
ElementNotFoundError: Raises exception if the Meta element is not present.
EmptyElementError: Raises exception if the colloquialVersion field is empty.
Returns:
[str]: The colloquialVersion attribute of Meta element.
"""
Meta = RIM.get_element(self.root, "Meta")
if Meta is None:
err_msg = "\t\tNo Meta element found in the RIM."
info_log.error(err_msg)
raise ElementNotFoundError(err_msg)
version = Meta.attrib['colloquialVersion']
if version is None or version == '':
err_msg = "Driver version not found in the RIM."
info_log.error(err_msg)
raise EmptyElementError(err_msg)
event_log.debug(f'The driver version in the RIM file is {version}')
version = version.lower()
return version
def extract_certificates(self):
""" Extracts all the x509 certificate in PEM format from the base RIM.
Raises:
ElementNotFoundError: it is raised if the required element is not present.
InvalidCertificateError: it is raised if there is any problem in
extracting the X509 certificate from the RIM file.
Returns:
[bytes]: the X509 PEM certificate data.
"""
try:
Signature = RIM.get_element(self.root, "Signature")
if Signature is None:
err_msg = "No Signature found in the RIM."
info_log.error(err_msg)
raise ElementNotFoundError(err_msg)
KeyInfo = RIM.get_element(Signature, "KeyInfo")
if KeyInfo is None:
err_msg = "No KeyInfor found in the RIM."
info_log.error(err_msg)
raise ElementNotFoundError(err_msg)
X509Data = RIM.get_element(KeyInfo, "X509Data")
if X509Data is None:
err_msg = "X509Data not found in the RIM."
info_log.error(err_msg)
raise ElementNotFoundError(err_msg)
X509Certificates = RIM.get_all_elements(X509Data, "X509Certificate")
if len(X509Certificates) == 0:
err_msg = "X509Certificates not found in the RIM."
info_log.error(err_msg)
raise ElementNotFoundError(err_msg)
result = list()
for i in range(len(X509Certificates) - 1):
header = "-----BEGIN CERTIFICATE-----\n"
cert_string = X509Certificates[i].text
cert_string = cert_string.replace(' ','')
tail = "-----END CERTIFICATE-----\n"
final = header + cert_string + tail
cert_bytes = final.encode()
x509_cert_object = crypto.load_certificate(type=crypto.FILETYPE_PEM, buffer=cert_bytes)
if not isinstance(x509_cert_object, crypto.X509):
raise ValueError()
result.append(x509_cert_object)
except Exception as error:
info_log.error(error)
err_msg = "\t\tThere was a problem while extracting the X509 certificate from the RIM."
info_log.info(err_msg)
raise InvalidCertificateError(err_msg)
return result
def verify_signature(self, settings):
""" Verifies the signature of the base RIM.
Arguments:
settings (config.HopperSettings): the object containing the various config info.
Returns:
[bool] : If signature verification is successful, then return the True. Otherwise,
raises RIMSignatureVerificationError.
"""
if self.rim_name == 'driver':
settings.mark_driver_rim_cert_extracted_successfully()
else:
settings.mark_vbios_rim_cert_extracted_successfully()
try:
# performs the signature verification of the RIM. We will get the root of the RIM
# if the signature verification is successful otherwise, it raises InvalidSignature exception.
verified_root = XMLVerifier().verify(self.root, ca_pem_file = settings.RIM_ROOT_CERT, ca_path = settings.RIM_ROOT_CERT_DIR).signed_xml
if verified_root is None:
err_msg = "\t\t\tRIM signature verification failed."
event_log.error(err_msg)
raise RIMSignatureVerificationError(err_msg)
except InvalidSignature as error:
err_msg = "\t\t\tRIM signature verification failed."
event_log.error(err_msg)
raise RIMSignatureVerificationError(err_msg)
except Exception as error:
info_log.error(error)
err_msg = "\t\t\tRIM signature verification failed."
event_log.error(err_msg)
raise RIMSignatureVerificationError(err_msg)
info_log.info(f"\t\t\t{self.rim_name} RIM signature verification successful.")
self.root = verified_root
return True
def get_measurements(self):
""" Returns the dictionary object that contains the golden measurement.
Returns:
[dict]: the dictionary containing the golden measurement.
"""
return self.measurements_obj
def parse_measurements(self, settings):
""" Lists the measurements of the Resource tags in the base RIM.
Args:
settings (config.HopperSettings): the object containing the various config info.
Raises:
ElementNotFoundError: it is raised if a required element is not found.
InvalidMeasurementIndexError: it is raised in case multiple measurement are assigned same index.
NoRIMMeasurementsError: it is raised in case there are no golden measurements in the RIM file.
"""
self.measurements_obj = dict()
Payload = RIM.get_element(self.root, "Payload")
if Payload is None:
err_msg = "Payload not found in the RIM."
info_log.error(err_msg)
raise ElementNotFoundError(err_msg)
for child in Payload:
if child.attrib['active'] == 'False':
active = False
else:
active =True
index = int(child.attrib['index'])
alternatives = int(child.attrib['alternatives'])
measurements_values = list()
for i in range(alternatives):
measurements_values.append(child.attrib[settings.HashFunctionNamespace + 'Hash' + str(i)])
golden_measurement = GoldenMeasurement(component = self.rim_name,
values = measurements_values,
name = child.attrib['name'],
index = index,
size = int(child.attrib['size']),
alternatives = alternatives,
active = active)
if index in self.measurements_obj:
raise InvalidMeasurementIndexError(f"Multiple measurement are assigned same index in {self.rim_name} rim.")
self.measurements_obj[index] = golden_measurement
if len(self.measurements_obj) == 0:
raise NoRIMMeasurementsError(f"\tNo golden measurements found in {self.rim_name} rim.\n\tQuitting now.")
event_log.debug(f"{self.rim_name} golden measurements are : \n\t\t\t\t\t\t\t")
for idx in self.measurements_obj:
event_log.debug(f"\n\t\t\t\t\t\t\t index : {idx}")
event_log.debug(f"\t\t\t\t\t\t\t number of alternative values : {self.measurements_obj[idx].get_number_of_alternatives()}")
for i in range(self.measurements_obj[idx].get_number_of_alternatives()):
event_log.debug(f"\t\t\t\t\t\t\t\t value {i + 1} : {self.measurements_obj[idx].get_value_at_index(i)}")
if self.rim_name == 'driver':
settings.mark_rim_driver_measurements_as_available()
else:
settings.mark_rim_vbios_measurements_as_available()
def verify(self, version, settings, schema_path = ''):
""" Performs the schema validation if it is successful then signature verification is done.
If both tests passed then returns True, otherwise returns False.
Arguments:
version (str) : the driver/vbios version of the required RIM.
settings (config.HopperSettings): the object containing the various config info.
base_RIM_path (str) : the path to the base RIM. Default value is None.
schema_path (str) : the path to the swidtag schema xsd file. Default value is "swid_schema_2015.xsd".
Returns :
[bool] : True if schema validation and signature verification passes, otherwise returns False.
"""
assert type(version) is str
assert type(schema_path) is str
if schema_path == "":
schema_path = os.path.join(os.path.dirname(__file__), 'swidSchema2015.xsd')
if not schema_path or not os.path.isfile(schema_path):
info_log.error("There is a problem in the path to the swid schema. Please provide a valid the path to the swid schema.")
raise FileNotFoundError("\t\tSWID schema file not found.")
if self.validate_schema(schema_path = schema_path):
info_log.info("\t\t\tRIM Schema validation passed.")
if self.rim_name == 'driver':
settings.mark_driver_rim_schema_validated()
else:
settings.mark_vbios_rim_schema_validated()
if version != self.colloquialVersion:
info_log.warning(f"\t\t\tThe {self.rim_name} version in the RIM file is not matching with the installed {self.rim_name} version.")
else:
if self.rim_name == 'driver':
settings.mark_rim_driver_version_as_matching()
else:
settings.mark_rim_vbios_version_as_matching()
event_log.debug(f"The {self.rim_name} version in the RIM file is matching with the installed {self.rim_name} version.")
rim_cert_chain = self.extract_certificates()
# Reading the RIM root certificate.
with open(os.path.join(settings.RIM_ROOT_CERT_DIR, settings.RIM_ROOT_CERT), 'r') as root_cert_file:
root_cert_data = root_cert_file.read()
if self.rim_name == 'driver':
mode = BaseSettings.Certificate_Chain_Verification_Mode.DRIVER_RIM_CERT
else:
mode = BaseSettings.Certificate_Chain_Verification_Mode.VBIOS_RIM_CERT
rim_cert_chain.append(crypto.load_certificate(type = crypto.FILETYPE_PEM, buffer = root_cert_data))
rim_cert_chain_verification_status = CcAdminUtils.verify_certificate_chain(rim_cert_chain,
settings,
mode)
if not rim_cert_chain_verification_status:
raise RIMCertChainVerificationError(f"\t\t\t{self.rim_name} RIM cert chain verification failed")
info_log.info(f"\t\t\t{self.rim_name} RIM certificate chain verification successful.")
rim_cert_chain_ocsp_revocation_status = CcAdminUtils.ocsp_certificate_chain_validation(rim_cert_chain, settings, mode)
if not rim_cert_chain_ocsp_revocation_status:
raise RIMCertChainOCSPVerificationError(f"{self.rim_name} RIM cert chain ocsp status verification failed.")
return self.verify_signature(settings)
else:
raise RIMSchemaValidationError(f"\t\t\tSchema validation of {self.rim_name} RIM failed.")
def __init__(self, rim_path, rim_name, settings):
""" The constructor method for the RIM class handling all the RIM file processing.
Args:
rim_path (str): the path to the RIM file
rim_name (str): the name of the RIM, can be either "driver" or "vbios"
settings (config.HopperSettings): the object containing various config.
Raises:
InvalidRIMNameError: it is raised if the rim_path is invalid.
"""
assert type(rim_path) is str
assert type(rim_name) is str
if rim_name != 'driver' and rim_name != 'vbios':
raise InvalidRIMNameError(f"Invalid rim name '{rim_name}' provided, valid names can be 'driver'/'vbios'.")
self.rim_name = rim_name
self.root = RIM.read(rim_path)
if rim_name == 'driver':
settings.mark_driver_rim_fetched()
else:
settings.mark_vbios_rim_fetched()
self.colloquialVersion = self.get_colloquial_version()
self.parse_measurements(settings)
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/rim/__init__.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from verifier.config import info_log
class GoldenMeasurement:
""" A class to represent the individual golden measurement values from the RIM files.
"""
def __init__(self, component, values, name, index, size, alternatives, active):
""" Constructor method to create the object for the individual golden measurement.
Args:
component (str): the component name for which the golden
measurement belongs "driver" or "vbios".
values (list): the list of valid/alternative golden measurement.
name (str): the name of the measurement.
index (int): the index of the measurement.
size (int): the size of the measurement value in number of bytes.
alternatives (int): number of valid/alternative measurements.
active (bool): True if the measurement is to be used for
comparision with the runtime measurement else False.
"""
self.set_component(component)
self.set_value(values)
self.set_name(name)
self.set_index(index)
self.set_size(size)
self.set_number_of_alternatives(alternatives)
self.set_active(active)
def get_component(self):
""" Fetches the component name for which the measurement belongs,
either "driver" or "vbios".
Returns:
[str]: the component name, one of the value "driver"/"vbios".
"""
return self.component
def set_component(self, component):
""" Sets the component name to the golden measurement. It can be either
"driver" or "vbios".
Args:
component (str): component name. It can be either "driver" for
driver measurement or "vbios" for vbios measurement.
"""
self.component = component
def get_value_at_index(self, index):
""" Fetches the golden measurement value at the given index among the
alternative values of the golden measurement at a particular
measurement index.
Args:
index (int): the position of the value in the list of alternative
measurement values.
Returns:
[str]: the measurement value.
"""
assert type(index) is int
return self.values[index]
def set_value(self, values):
""" Sets the list of measurement values to the GoldenMeasurement class
object.
Args:
values (list): the list of valid golden measurement at an index.
"""
assert type(values) is list
self.values = values
def get_name(self):
""" Fetches the name of the golden measurement.
Returns:
[str]: the name of the golden measurement.
"""
return self.name
def set_name(self, name):
""" Sets the name of measurement values to the GoldenMeasurement class
object.
Args:
name (str): the name of the golden measurement.
"""
self.name = name
def get_index(self):
""" Fetches the index of the golden measurement.
Returns:
[int]: the index of the golden measurement.
"""
return self.index
def set_index(self, index):
""" Sets the index of the golden measurement.
Args:
index (int): the index of the golden measurement.
"""
self.index = index
def get_size(self):
""" Fetches the size of the golden measurement value in number of bytes.
Returns:
[int]: the size of the measurement.
"""
return self.size
def set_size(self, size):
""" Sets the size of the golden measurement value in number of bytes.
Args:
size (int): the size of the measurement.
"""
self.size = size
def get_number_of_alternatives(self):
""" Fetches the number of valid alternative values for the golden
measurement.
Returns:
[int]: the number of valid values.
"""
return self.alternatives
def set_number_of_alternatives(self, value):
""" Sets the number of valid alternative values for the golden
measurement.
Args:
value (int): the numner of valid values.
"""
self.alternatives = value
def is_active(self):
""" Checks if the given golden measurement needs to be compared with
the corresponding run time measurement.
Returns:
[bool]: True if being used for comparison with runtime
measurement otherwise returns False.
"""
return self.active
def set_active(self, active):
""" Sets wether the given golden measurement needs to be compared with
the corresponding run time measurement or not.
Args:
active (bool): True if being used for comparison with runtime
measurement otherwise returns False.
"""
self.active = active
def print_obj(self, logger):
""" This method prints the various fields of the GoldenMeasurement
class object representing the individual golden measurement.
Args:
logger (logging.Logger): the logger object.
"""
logger.info('-----------------------------------')
logger.info(f"\tcomponent : {self.component}")
logger.info(f"\tvalue : {self.value}")
logger.info(f"\tname : {self.name}")
logger.info(f"\tindex : {self.index}")
logger.info(f"\tsize : {self.size}")
logger.info(f"\tnullable : {self.nullable}")
logger.info(f"\tactive : {self.active}")
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/rim/golden_measurement.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""A module to handle all the nvml api calls for the verifier.
"""
from pynvml import (
nvmlInit,
nvmlDeviceGetArchitecture,
nvmlDeviceGetBoardId,
nvmlDeviceGetCount,
nvmlDeviceGetHandleByIndex,
nvmlDeviceGetUUID,
nvmlDeviceGetVbiosVersion,
nvmlShutdown,
nvmlSystemGetDriverVersion,
nvmlDeviceGetConfComputeGpuAttestationReport,
nvmlSystemSetConfComputeGpusReadyState,
nvmlSystemGetConfComputeGpusReadyState,
nvmlSystemGetConfComputeState,
NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE,
NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE,
NVML_ERROR_UNINITIALIZED,
NVML_ERROR_DRIVER_NOT_LOADED,
NVML_ERROR_TIMEOUT,
NVML_ERROR_RESET_REQUIRED,
NVML_ERROR_IN_USE,
NVML_ERROR_MEMORY,
NVML_ERROR_NO_DATA,
NVML_ERROR_INSUFFICIENT_RESOURCES,
)
from verifier.utils import (
get_gpu_architecture_value,
function_wrapper_with_timeout,
)
from verifier.config import (
BaseSettings,
info_log,
event_log,
__author__,
__copyright__,
__version__,
)
from verifier.nvml.gpu_cert_chains import GpuCertificateChains
from verifier.nvml.nvmlHandlerTest import NvmlHandlerTest
from verifier.exceptions import (
AttestationReportFetchError,
TimeoutError,
)
class NvmlHandler:
""" Class to handle all the pynvml api calls and fetching the GPU information.
"""
Handles = None
TEMPORARY_ISSUE = [
NVML_ERROR_UNINITIALIZED,
NVML_ERROR_DRIVER_NOT_LOADED,
NVML_ERROR_TIMEOUT,
NVML_ERROR_RESET_REQUIRED,
NVML_ERROR_IN_USE,
NVML_ERROR_MEMORY,
NVML_ERROR_NO_DATA,
NVML_ERROR_INSUFFICIENT_RESOURCES,
]
@classmethod
def get_number_of_gpus(cls):
""" A class method to get the number of available gpus and create a
list of GPU device handles for the available GPUs.
Returns:
[int]: number of available GPUs.
"""
number_of_gpus = function_wrapper_with_timeout([nvmlDeviceGetCount,
"nvmlDeviceGetCount"],
BaseSettings.MAX_NVML_TIME_DELAY)
cls.Handles = list()
for i in range(number_of_gpus):
cls.Handles.append(function_wrapper_with_timeout([nvmlDeviceGetHandleByIndex,
i,
"nvmlDeviceGetHandleByIndex"],
BaseSettings.MAX_NVML_TIME_DELAY))
return number_of_gpus
@staticmethod
def close_nvml():
""" Static method to close the pynvml library.
"""
function_wrapper_with_timeout([nvmlShutdown, "nvmlShutdown"], BaseSettings.MAX_NVML_TIME_DELAY)
@staticmethod
def init_nvml():
""" Static method to initialize the pynvml library.
"""
function_wrapper_with_timeout([nvmlInit, "nvmlInit"], BaseSettings.MAX_NVML_TIME_DELAY)
@staticmethod
def set_gpu_ready_state(state):
""" Static method to set GPU state as ready if the input is True otherwise set as not ready to accept workload.
"""
assert type(state) is bool
if state:
ready_state = NVML_CC_ACCEPTING_CLIENT_REQUESTS_TRUE
else:
ready_state = NVML_CC_ACCEPTING_CLIENT_REQUESTS_FALSE
function_wrapper_with_timeout([nvmlSystemSetConfComputeGpusReadyState,
ready_state,
"nvmlSystemSetConfComputeGpusReadyState"],
BaseSettings.MAX_NVML_TIME_DELAY)
@staticmethod
def is_cc_enabled():
""" Static method to check if the confidential compute feature is enabled or not.
Returns:
[bool]: returns True if the cc feature is enabled in driver, otherwise
returns False.
"""
state = function_wrapper_with_timeout([nvmlSystemGetConfComputeState,
"nvmlSystemGetConfComputeState"], BaseSettings.MAX_NVML_TIME_DELAY)
return state.ccFeature != 0
@staticmethod
def is_cc_dev_mode():
""" Static method to check if the driver is in "CC DEV" mode or not.
Returns:
[bool]: returns True if the driver is in CC DEV mode, otherwise
returns False.
"""
state = function_wrapper_with_timeout([nvmlSystemGetConfComputeState,
"nvmlSystemGetConfComputeState"], BaseSettings.MAX_NVML_TIME_DELAY)
return state.devToolsMode != 0
@staticmethod
def get_gpu_ready_state():
""" Static method to check the GPU state.
Returns:
[int]: returns 0 for not ready 1 for ready state.
"""
state = function_wrapper_with_timeout([nvmlSystemGetConfComputeGpusReadyState,
"nvmlSystemGetConfComputeGpusReadyState"],
BaseSettings.MAX_NVML_TIME_DELAY)
return state
def fetch_attestation_report(self, index, nonce):
""" Fetches the attestation report of the GPU.
Args:
index (int): index of the GPU.
nonce (bytes): then nonce.
Raises:
AttestationReportFetchError: it is raised if the attestation report
could not be fetched.
Returns:
[bytes]: the raw attestation report data.
"""
try:
attestation_report_struct = function_wrapper_with_timeout([nvmlDeviceGetConfComputeGpuAttestationReport,
self.Handles[index],
nonce,
"nvmlDeviceGetConfComputeGpuAttestationReport"],
BaseSettings.MAX_NVML_TIME_DELAY)
length_of_attestation_report = attestation_report_struct.attestationReportSize
attestation_report = attestation_report_struct.attestationReport
attestation_report_data = list()
for i in range(length_of_attestation_report):
attestation_report_data.append(attestation_report[i])
bin_attestation_report_data = bytes(attestation_report_data)
BaseSettings.mark_attestation_report_as_available()
return bin_attestation_report_data
except TimeoutError as err:
raise AttestationReportFetchError("\tThe call to fetch attestation report timed out.")
except Exception as err:
info_log.error(err)
err_msg = "\tSomething went wrong while fetching the attestation report from the gpu."
event_log.error(err_msg)
raise AttestationReportFetchError(err_msg)
def get_driver_version(self):
""" Fetches the DriverVersion field of the NvmlHandler class object.
Returns:
[str]: the driver version.
"""
return self.DriverVersion
def get_uuid(self):
""" Fetches the UUID field of the NvmlHandler class object.
Returns:
[str]: the UUID
"""
return self.UUID
def get_vbios_version(self):
""" Fetches the VbiosVersion field of the NvmlHandler class object.
Returns:
[str]: the vbios version
"""
return self.VbiosVersion
def get_attestation_cert_chain(self):
""" Fetches the GPU attestation certificate chain from the
GpuCertificateChains class object.
Returns:
[list]: the list of x509 certificates of the certificate chain.
"""
return self.CertificateChains.GpuAttestationCertificateChain
def get_attestation_report(self):
""" Fetches the attestation report data of the NvmlHandler class object.
Returns:
[bytes]: the attestation report data.
"""
return self.AttestationReport
def get_gpu_architecture(self):
""" Fetches the name of the current GPU.
architecture.
Returns:
[str]: the GPU architecture.
"""
return get_gpu_architecture_value(self.GPUArchitecture)
def init_handle(self):
""" Fetches the GPU handle for the current GPU index value.
"""
self.Handles[self.Index] = function_wrapper_with_timeout([nvmlDeviceGetHandleByIndex,
self.Index,
"nvmlDeviceGetHandleByIndex"],
BaseSettings.MAX_NVML_TIME_DELAY)
def init_driver_version(self):
""" Fetches and assigns the Driver Version from the driver via pynvml
api.
"""
self.DriverVersion = function_wrapper_with_timeout([nvmlSystemGetDriverVersion,
"nvmlSystemGetDriverVersion"],
BaseSettings.MAX_NVML_TIME_DELAY)
def init_board_id(self):
""" Fetches and assigns the BoardId from the driver via pynvml api.
"""
self.BoardId = function_wrapper_with_timeout([nvmlDeviceGetBoardId,
self.Handles[self.Index],
"nvmlDeviceGetBoardId"],
BaseSettings.MAX_NVML_TIME_DELAY)
def init_uuid(self):
""" Fetches and assigns the UUID of the GPU to the UUID field.
"""
self.UUID = function_wrapper_with_timeout([nvmlDeviceGetUUID,
self.Handles[self.Index],
"nvmlDeviceGetUUID"],
BaseSettings.MAX_NVML_TIME_DELAY)
def init_gpu_architecture(self):
""" Fetches and assigns the GPU device architecture field.
"""
self.GPUArchitecture = function_wrapper_with_timeout([nvmlDeviceGetArchitecture,
self.Handles[self.Index],
"nvmlDeviceGetArchitecture"],
BaseSettings.MAX_NVML_TIME_DELAY)
def init_vbios_version(self):
""" Fetches and assigns the VbiosVersion field via pynvml api.
"""
self.VbiosVersion = function_wrapper_with_timeout([nvmlDeviceGetVbiosVersion,
self.Handles[self.Index],
"nvmlDeviceGetVbiosVersion"],
BaseSettings.MAX_NVML_TIME_DELAY)
def __init__(self, index, nonce, settings):
""" Constructor method for the NvmlHandler class that initializes the
various field values.
Args:
index (int): the index of the NvmlHandler class object.
nonce (bytes): the nonce for the attestation report.
settings (config.HopperSettings): the object containing the various config info.
"""
assert type(index) is int
assert type(nonce) is bytes and len(nonce) == BaseSettings.SIZE_OF_NONCE_IN_BYTES
self.Index = index
self.init_handle()
self.init_driver_version()
self.init_board_id()
self.init_uuid()
self.init_gpu_architecture()
self.init_vbios_version()
self.CertificateChains = GpuCertificateChains(self.Handles[index])
self.AttestationReport = self.fetch_attestation_report(index, nonce)
settings.mark_attestation_report_as_available()
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/nvml/__init__.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from OpenSSL import crypto
import re
from pynvml import nvmlDeviceGetConfComputeGpuCertificate
from verifier.config import (
BaseSettings,
info_log,
event_log,
)
from verifier.exceptions import (
CertExtractionError,
CertChainFetchError,
)
from .test_handle import TestHandle
from verifier.utils import function_wrapper_with_timeout
class GpuCertificateChains:
""" A class to handle the fetching and processing of the GPU attestation certificate chain.
"""
@classmethod
def get_gpu_certificate_chains(cls, handle):
""" A class method that fetches the GPU attestation certificate chain data in PEM format.
Args:
handle (pynvml.nvml.LP_struct_c_nvmlDevice_t): handle of the GPU.
Raises:
CertChainFetchError: raises exception if there is any problem while fetching the certificate chains.
Returns:
[bytes]: attestation certificate chain data.
"""
try:
cert_struct = function_wrapper_with_timeout([nvmlDeviceGetConfComputeGpuCertificate,
handle,
"nvmlDeviceGetConfComputeGpuCertificate"],
BaseSettings.MAX_NVML_TIME_DELAY)
# fetching the attestation cert chain.
length_of_attestation_cert_chain = cert_struct.attestationCertChainSize
attestation_cert_chain = cert_struct.attestationCertChain
attestation_cert_data = list()
for i in range(length_of_attestation_cert_chain):
attestation_cert_data.append(attestation_cert_chain[i])
bin_attestation_cert_data = bytes(attestation_cert_data)
return bin_attestation_cert_data
except Exception as err:
info_log.error(err)
err_msg = "\tSomething went wrong while fetching the certificate chains from the gpu."
event_log.error(err_msg)
raise CertChainFetchError(err_msg)
@classmethod
def extract_cert_chain(cls, bin_cert_chain_data):
""" A class method that takes in the raw data coming in from the nvml api as the gpu certificate chain in PEM format
and then parse it to extract the individual certificates from the certificate chain.
Args:
bin_cert_chain_data (bytes): the certificate chain in PEM format.
Returns:
[list] : List of the certificates extracted from the given cert chain.
"""
try:
assert type(bin_cert_chain_data) is bytes
PEM_CERT_END_DELIMITER = '-----END CERTIFICATE-----'
start_index = 0
end_index = None
# length of \n is 1
length_of_new_line = 1
str_data = bin_cert_chain_data.decode()
cert_obj_list = list()
for itr in re.finditer(PEM_CERT_END_DELIMITER, str_data):
end_index = itr.start()
cert_obj_list.append(crypto.load_certificate(crypto.FILETYPE_PEM, \
str_data[start_index : end_index + len(PEM_CERT_END_DELIMITER)]))
start_index = end_index + len(PEM_CERT_END_DELIMITER) + length_of_new_line
if len(str_data) < start_index:
break
return cert_obj_list
except Exception as err:
info_log.error(err)
err_msg = "\tSomething went wrong while extracting the individual certificates from the certificate chain."
event_log.error(err_msg)
raise CertExtractionError(err_msg)
def __init__(self, handle):
""" Constructor method for the GpuCertificateChains class.
Args:
handle (pynvml.LP_struct_c_nvmlDevice_t): the GPU device handle.
"""
if isinstance(handle, TestHandle):
self.GpuAttestationCertificateChain = self.extract_cert_chain(handle.get_test_gpu_certificate_chain())
else:
self.GpuAttestationCertificateChain = self.extract_cert_chain(self.get_gpu_certificate_chains(handle))
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/nvml/gpu_cert_chains.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
class TestHandle:
""" A class to feed the hard coded GPU attestation certificate chains
during testing.
"""
def get_test_gpu_certificate_chain(self):
""" Fetches the hardcoded GPU attestation certificate chain data.
Returns:
[bytes]: the GPU attestation certificate chain data.
"""
return self.test_cert_chain
def __init__(self, test_cert_chain_data):
""" Constructor method for the TestHandle class.
Args:
test_cert_chain_data (bytes): the hardcoded GPU attestation
certificate chain data.
"""
self.test_cert_chain = test_cert_chain_data
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/nvml/test_handle.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from OpenSSL import crypto
import re
from pynvml import (
NVML_DEVICE_ARCH_HOPPER,
)
from verifier.utils import (
get_gpu_architecture_value,
convert_string_to_blob,
)
from verifier.config import (
BaseSettings,
HopperSettings,
__author__,
__copyright__,
__version__,
)
from verifier.nvml import GpuCertificateChains
from verifier.nvml.test_handle import TestHandle
from verifier.exceptions import (
CertExtractionError,
UnsupportedGpuArchitectureError,
)
class NvmlHandlerTest:
@classmethod
def get_number_of_gpus(cls):
return BaseSettings.TEST_NO_GPU_NUMBER_OF_GPUS
def extract_cert_chain(self, bin_cert_chain_data):
try:
assert type(bin_cert_chain_data) is bytes
PEM_CERT_END_DELIMITER = '-----END CERTIFICATE-----'
start_index = 0
end_index = None
# length of \n is 1
length_of_new_line = 1
str_data = bin_cert_chain_data.decode()
cert_obj_list = list()
for itr in re.finditer(PEM_CERT_END_DELIMITER, str_data):
end_index = itr.start()
cert_obj_list.append(crypto.load_certificate(crypto.FILETYPE_PEM, \
str_data[start_index : end_index + len(PEM_CERT_END_DELIMITER)]))
start_index = end_index + len(PEM_CERT_END_DELIMITER) + length_of_new_line
if len(str_data) < start_index:
break
return cert_obj_list
except Exception as err:
raise CertExtractionError("\tSomething went wrong while extracting the individual certificates from the certificate chain.\n\tQuitting now.")
def fetch_attestation_report(self):
if self.GPUArchitecture == NVML_DEVICE_ARCH_HOPPER:
path = HopperSettings.ATTESTATION_REPORT_PATH
else:
raise UnsupportedGpuArchitectureError("Only HOPPER architecture is supported.")
with open(path, 'r') as f:
data = convert_string_to_blob(f.read())
return data
def get_driver_version(self):
return self.DriverVersion
def get_vbios_version(self):
return self.VbiosVersion
def get_test_attestation_cert_chain(self):
if self.GPUArchitecture == NVML_DEVICE_ARCH_HOPPER:
path = HopperSettings.GPU_ATTESTATION_CERTIFICATES_PATH
else:
raise UnsupportedGpuArchitectureError("Only HOPPER architecture is supported.")
with open(path, 'rb') as f:
data = f.read()
return data
def get_attestation_cert_chain(self):
return self.CertificateChains.GpuAttestationCertificateChain
def get_attestation_report(self):
return self.AttestationReport
def get_gpu_architecture(self):
return get_gpu_architecture_value(self.GPUArchitecture)
def get_uuid(self):
return self.UUID
def __init__(self, settings):
self.GPUArchitecture = NVML_DEVICE_ARCH_HOPPER
self.BoardId = 11111
self.Index = 0
self.UUID = 'GPU-11111111-2222-3333-4444-555555555555'
self.VbiosVersion = "96.00.5e.00.01"
self.DriverVersion = "545.00"
self.AttestationReport = self.fetch_attestation_report()
settings.mark_attestation_report_as_available()
cert_data = self.get_test_attestation_cert_chain()
handle = TestHandle(cert_data)
self.CertificateChains = GpuCertificateChains(handle)
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/nvml/nvmlHandlerTest.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import queue
from queue import Empty
from threading import (
Thread,
Event,
)
from cryptography import x509
from cryptography.hazmat.primitives import serialization
from verifier.config import (
event_log,
info_log,
BaseSettings,
__author__,
__copyright__,
__version__,
)
from verifier.exceptions import (
UnknownGpuArchitectureError,
TimeoutError,
)
def get_gpu_architecture_value(nvml_arch_value):
""" A function to map the NVML architecture integer value to the
corresponding architecture name.
Args:
nvml_arch_value (int): nvml architecture integer value.
Raises:
UnknownGpuArchitectureError: it is raised if the given value does not
correspondes to any known GPU architecture.
Returns:
[str]: The corresponding the GPU architecture name.
"""
if nvml_arch_value == 2:
return "KEPLER"
elif nvml_arch_value == 3:
return "MAXWELL"
elif nvml_arch_value == 4:
return "PASCAL"
elif nvml_arch_value == 5:
return "VOLTA"
elif nvml_arch_value == 6:
return "TURING"
elif nvml_arch_value == 7:
return "AMPERE"
elif nvml_arch_value == 9:
return "HOPPER"
else:
event_log.error("Unknown GPU architecture.")
raise UnknownGpuArchitectureError("Unknown GPU architecture.")
def read_field_as_little_endian(binary_data):
""" Reads a multi-byte field in little endian form and return the read
field as a hexadecimal string.
Args:
binary_data (bytes): the data to be read in little endian format.
Returns:
[str]: the value of the field as hexadecimal string.
"""
assert type(binary_data) is bytes
x= str()
for i in range(len(binary_data)):
temp = binary_data[i : i + 1]
x = temp.hex() + x
return x
def convert_string_to_blob(inp):
""" A function to convert the input string of byte values to bytes data type.
Args:
inp (str): the input string
Returns:
[bytes]: the corresponding binary data.
"""
assert type(inp) is str
out = inp.replace(" ", "")
out = out.replace("\n", "")
out = out.replace("0x", "")
out = out.replace("\\x", "")
out = bytes.fromhex(out)
return out
def extract_public_key(certificate):
""" Reads the leaf certificate of GPU and then extract the public key.
Args:
certificate (cryptography.hazmat.backends.openssl.x509._Certificate):
the gpu leaf certificate as an cryptography x509 object.
Returns:
[bytes]: the public key extracted from the certificate in PEM format.
"""
assert isinstance(certificate, x509.Certificate)
public_key = certificate.public_key()
public_key_in_pem_format = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public_key_in_pem_format
def is_zeros(x):
""" This function checks if all the character are zeros of the given input
string.
Args:
x (str): the input string.
Returns:
[bool]: True if all the characters are '0', otherwise False.
"""
assert type(x) is str
for i in range(len(x)):
if x[i] != '0':
return False
return True
def format_vbios_version(version):
""" Converts the input VBIOS version to xx.xx.xx.xx.xx format.
Args:
version (bytes): the VBIOS version
Returns:
[str]: the vbios version in the required format.
"""
assert type(version) is bytes
value = read_field_as_little_endian(version)
temp = value[len(value) // 2:] + value[len(value) // 2 - 2 : len(value) // 2]
idx = 0
result = str()
for i in range(0, len(temp) - 2, 2):
result = result + temp[i : i + 2] + "."
idx = i + 2
result = result + temp[idx : idx + 2]
return result
def function_caller(inp):
""" This function is run in a separate thread by
function_wrapper_with_timeout function so that if the execution of the
function passed as an argument takes more than the max threshold time limit then
the thread is killed.
Args:
inp (tuple): the tuple containing the function to be executed and its
arguments.
"""
assert type(inp) is list
event = inp[-1]
q = inp[-2]
function_name = inp[-3]
function = inp[0]
arguments = inp[1:-3]
result = function(*arguments)
if event.is_set():
event_log.info(f"{function_name} execution timed out, stopping.")
return
q.put(result)
def function_wrapper_with_timeout(args, max_time_delay):
""" This function spawns a separate thread for the given function in the
arguments to be executed in that separate thread.
Args:
args (list): the list containing the function and its arguments.
Raises:
TimeoutError: it is raised if the thread spawned takes more time than
the threshold time limit.
Returns:
[any]: the return of the function being executed in the thread.
"""
assert type(args) is list
try:
function_name = args[-1]
q = queue.Queue()
args.append(q)
event = Event()
args.append(event)
args = ((args),)
event_log.info(f"{function_name} called.")
thread = Thread(target = function_caller, args = args)
thread.start()
return_value = q.get(block=True, timeout= max_time_delay)
event.set()
return return_value
except Empty:
event_log.error(f"The {function_name} call timed out.")
raise TimeoutError(f"The {function_name} call timed out.")
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/utils/__init__.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
class Error(Exception):
""" Base class for other exceptions.
"""
pass
class IncorrectProjectError(Error):
""" It is raised in case of wrong project name is provided as command line
argument.
"""
pass
class AttestationReportError(Error):
""" Base class for all exceptions related to attestation report.
"""
pass
class SignatureVerificationError(AttestationReportError):
""" It is raised when the signature verification of attestation report fails.
"""
pass
class NoMeasurementsError(AttestationReportError):
""" It is raised in case there are no or blank measurement block.
"""
pass
class ParsingError(AttestationReportError):
""" It is raised in case of any issues during parsing of the attestation
report data.
"""
pass
class NoMeasurementBlockError(AttestationReportError):
""" It is raised when there are zero number of measurement blocks.
"""
pass
class MeasurementSpecificationError(AttestationReportError):
""" It is raised if any measurement block does not follow DMTF
specification.
"""
pass
class NoCertificateError(AttestationReportError):
""" It is raised in case there are no certificates in the GPU attestation
certificate chain.
"""
pass
class IncorrectNumberOfCertificatesError(AttestationReportError):
""" It is raised in case there are unexpected number of certificates in the
GPU attestation certificate chain.
"""
pass
class CertChainVerificationFailureError(AttestationReportError):
""" It is raised in case of the GPU attestation certificate chain
verification failure.
"""
pass
class AttestationReportVerificationError(AttestationReportError):
""" It is raised in case of attestation report signature verification
failure.
"""
pass
class NonceMismatchError(AttestationReportError):
""" It is raised in case the nonce in the SPDM GET MEASUREMENT request
message is not matching with the generated nonce.
"""
pass
class DriverVersionMismatchError(AttestationReportError):
""" It is raised in case the driver version in attestation report is not
matching with the driver verison fetched from the driver.
"""
pass
class VBIOSVersionMismatchError(AttestationReportError):
""" It is raised in case the vbios version in attestation report is not
matching with the vbios verison fetched from the driver.
"""
pass
class PynvmlError(Error):
""" It is the base class for all exceptions related to pynvml.
"""
pass
class AttestationReportFetchError(PynvmlError):
""" It is raised in case there is a failure in fetching the Attestation
report.
"""
pass
class CertChainFetchError(PynvmlError):
""" It is raised in case there is a failure in fetching the GPU attestation
certificate chain.
"""
pass
class CertExtractionError(PynvmlError):
""" It is raised in case there is any issue in extracting the individual
certificates from the certificate chain.
"""
pass
class UnknownGpuArchitectureError(PynvmlError):
""" It is raised if the GPU architecture is not correct.
"""
pass
class UnsupportedGpuArchitectureError(PynvmlError):
""" It is raised if the GPU architecture is not supported.
"""
pass
class NoGpuFoundError(PynvmlError):
""" It is raised in case the number of available GPU is zero.
"""
pass
class TimeoutError(PynvmlError):
""" It is raised in case the pynvml api call exceeds the threshold limit.
"""
pass
class RIMError(Error):
""" It is a base class for exceptions related to the RIM.
"""
pass
class RIMFetchError(RIMError):
""" It is raised in case the required RIM file could not be fetched.
"""
pass
class ElementNotFoundError(RIMError):
""" It is raised in case the reqired element is not found in the RIM file.
"""
pass
class EmptyElementError(RIMError):
""" It is raised in case the content of an element in the RIM file is empty.
"""
pass
class RIMSignatureVerificationError(RIMError):
""" It is raised in case the signature verification of RIM file fails.
"""
pass
class InvalidCertificateError(RIMError):
""" It is raised in case there is a problem in extracting the X509
certificate from the RIM file.
"""
pass
class RIMCertChainVerificationError(RIMError):
""" It is raised in case of the RIM certificate chain verification fails.
"""
pass
class RIMCertChainOCSPVerificationError(RIMError):
""" It is raised in case the RIM certificate chain OCSP status verification fails.
"""
pass
class NoRIMMeasurementsError(RIMError):
""" It is raised in case there are no measurement values in the RIM file.
"""
pass
class FileNotFoundError(RIMError):
""" It is raised in case the required file is not found.
"""
pass
class RIMVerificationFailureError(RIMError):
""" It is raised in case the verification of RIM fails.
"""
pass
class RIMSchemaValidationError(RIMError):
""" It is raised in case the RIM schema validation fails.
"""
pass
class InvalidRIMNameError(RIMError):
""" It is raised in case the name assigned to the RIM class is something
other than "driver" or "vbios".
"""
pass
class VerifierError(Error):
""" It is the base class for the exceptions related to the verifier.
"""
pass
class MeasurementMismatchError(VerifierError):
""" It is raised in case any runtime measurement does not matches with the
golden value.
"""
pass
class InvalidMeasurementIndexError(VerifierError):
""" It is raised in case the same measurement value index is active in both
driver and vbios RIM file.
"""
pass
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/exceptions/__init__.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from pynvml import (
NVML_ERROR_UNINITIALIZED,
NVML_ERROR_TIMEOUT,
NVML_ERROR_RESET_REQUIRED,
NVML_ERROR_IN_USE,
NVML_ERROR_MEMORY,
NVML_ERROR_NO_DATA,
NVML_ERROR_INSUFFICIENT_RESOURCES,
NVMLError,
)
from verifier.exceptions import (
SignatureVerificationError,
NonceMismatchError,
DriverVersionMismatchError,
AttestationReportFetchError,
CertChainFetchError,
RIMSignatureVerificationError,
RIMVerificationFailureError,
MeasurementMismatchError,
RIMSchemaValidationError,
InvalidMeasurementIndexError,
VBIOSVersionMismatchError,
)
def is_non_fatal_issue(error):
""" The function to check if the given error is non fatal or not.
Args:
error (Exception): any exception that may be raised.
Returns:
[bool]: returns True if the error is non fatal. Otherwise returns
False.
"""
if isinstance(error, type(NVMLError(NVML_ERROR_UNINITIALIZED))) or \
isinstance(error, type(NVMLError(NVML_ERROR_TIMEOUT))) or \
isinstance(error, type(NVMLError(NVML_ERROR_RESET_REQUIRED))) or \
isinstance(error, type(NVMLError(NVML_ERROR_IN_USE))) or \
isinstance(error, type(NVMLError(NVML_ERROR_MEMORY))) or \
isinstance(error, type(NVMLError(NVML_ERROR_NO_DATA))) or \
isinstance(error, type(NVMLError(NVML_ERROR_INSUFFICIENT_RESOURCES))) or \
isinstance(error, NonceMismatchError) or \
isinstance(error, MeasurementMismatchError):
return True
return False
def need_to_change_gpu_state(error):
""" The function to check if there is a need to set the GPU Ready state to
not ready.
Args:
error (Exception): any exception that may be raised.
Returns:
[bool]: returns True if there is a need to change the GPU ready state,
otherwise returns False.
"""
if isinstance(error, type(NVMLError(NVML_ERROR_UNINITIALIZED))) or \
isinstance(error, type(NVMLError(NVML_ERROR_TIMEOUT))) or \
isinstance(error, type(NVMLError(NVML_ERROR_RESET_REQUIRED))) or \
isinstance(error, type(NVMLError(NVML_ERROR_IN_USE))) or \
isinstance(error, type(NVMLError(NVML_ERROR_MEMORY))) or \
isinstance(error, type(NVMLError(NVML_ERROR_NO_DATA))) or \
isinstance(error, type(NVMLError(NVML_ERROR_INSUFFICIENT_RESOURCES))) or \
isinstance(error, AttestationReportFetchError) or \
isinstance(error, SignatureVerificationError) or \
isinstance(error, DriverVersionMismatchError) or \
isinstance(error, VBIOSVersionMismatchError) or \
isinstance(error, CertChainFetchError) or \
isinstance(error, RIMSchemaValidationError) or \
isinstance(error, RIMVerificationFailureError) or \
isinstance(error, RIMSignatureVerificationError) or \
isinstance(error, InvalidMeasurementIndexError):
return True
return False
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/exceptions/utils.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from verifier.config import info_log
from verifier.exceptions import ParsingError
class SpdmMeasurementRequestMessage:
""" Class representing the SPDM GET_MEASUREMENT request message.
Following is the expected structure of the MEASUREMENTS request message in DMTF's SPDM 1.1 spec.
OFFSET - FIELD - SIZE(in bytes)
0 - SPDMVersion - 1
1 - RequestResponseCode - 1
2 - Param1 - 1
3 - Param2 - 1
4 - Nonce - 32
36 - SlotIDParam - 1
"""
FieldSize = {
"SPDMVersion" : 1,
"RequestResponseCode" : 1,
"Param1" : 1,
"Param2" : 1,
"Nonce" : 32,
"SlotIDParam" : 1,
}
def get_spdm_version(self):
""" Fetches the spdm version field of the object of SpdmMeasurementRequestMessage.
Returns:
[bytes]: the spdm version.
"""
return self.SPDMVersion
def set_spdm_version(self, value):
""" Sets the spdm version field of the object representing the SPDM GET_MEASUREMENT request.
Args:
value (bytes): the spdm version.
"""
self.SPDMVersion = value
def get_request_response_code(self):
""" Fetches the RequestResponseCode field of the object representing the SPDM GET_MEASUREMENT request.
Returns:
[bytes]: the RequestResponseCode
"""
return self.RequestResponseCode
def set_request_response_code(self, value):
""" Sets the RequestResponseCode field of the object representing the SPDM GET_MEASUREMENT request.
Args:
value (bytes): the RequestResponse value.
"""
self.RequestResponseCode = value
def get_param1(self):
""" Fetches the Param1 field of the object representing the SPDM GET_MEASUREMENT request.
Returns:
[bytes]: the Param1 value.
"""
return self.Param1
def set_param1(self, value):
""" Sets the Param1 field of the object representing the SPDM GET_MEASUREMENT request.
Args:
value (bytes): the Param1 value.
"""
self.Param1 = value
def get_param2(self):
""" Fetches the Param2 field of the object representing the SPDM GET_MEASUREMENT request.
Returns:
[bytes]: the Param2 value
"""
return self.Param2
def set_param2(self, value):
""" Sets the Param2 field of the object representing the SPDM GET_MEASUREMENT request.
Args:
value (bytes): the Param2 value.
"""
self.Param2 = value
def get_nonce(self):
""" Fetches the Nonce field of the object representing the SPDM GET_MEASUREMENT request.
Returns:
[bytes]: the nonce value.
"""
return self.Nonce
def set_nonce(self, value):
""" Sets the Nonce field value of the object representing the SPDM GET_MEASUREMENT request.
Args:
value (bytes): the nonce value.
"""
self.Nonce = value
def get_slot_id_param(self):
""" Fetches the SlotIDParam field value of the object representing the SPDM GET_MEASUREMENT request.
Returns:
[bytes]: SlotIDParam value.
"""
return self.SlotIDParam
def set_slot_id_param(self, value):
""" Sets the SlotIDParam field value of the object representing the SPDM GET_MEASUREMENT request.
Args:
value (bytes): the SlotIDParam value.
"""
self.SlotIDParam = value
def parse(self, request_data):
""" Parses the raw SPDM GET_MEASUREMENT request message.
Args:
request_data (bytes): the raw message data.
Raises:
ParsingError: it is raised if there is any incorrect data field length.
"""
byte_index = 0
value = request_data[byte_index : byte_index + self.FieldSize['SPDMVersion']]
self.set_spdm_version(value)
byte_index = byte_index + self.FieldSize['SPDMVersion']
value = request_data[byte_index : byte_index + self.FieldSize['RequestResponseCode']]
self.set_request_response_code(value)
byte_index = byte_index + self.FieldSize['RequestResponseCode']
value = request_data[byte_index : byte_index + self.FieldSize['Param1']]
self.set_param1(value)
byte_index = byte_index + self.FieldSize['Param1']
value = request_data[byte_index : byte_index + self.FieldSize['Param2']]
self.set_param2(value)
byte_index = byte_index + self.FieldSize['Param2']
value = request_data[byte_index : byte_index + self.FieldSize['Nonce']]
self.set_nonce(value)
byte_index = byte_index + self.FieldSize['Nonce']
value = request_data[byte_index : byte_index + self.FieldSize['SlotIDParam']]
self.set_slot_id_param(value)
byte_index = byte_index + self.FieldSize['SlotIDParam']
if byte_index != len(request_data):
err_msg = "Something went wrong during parsing the SPDM GET MEASUREMENT request message."
info_log.error(err_msg)
raise ParsingError(err_msg)
def print_obj(self, logger):
""" Prints all the field values of the object representing the SPDM GET_MEASUREMENT request.
Args:
logger (logging.Logger): the logger object.
"""
logger.debug("GET MEASUREMENT REQUEST MESSAGE")
logger.debug(f"SPDMVersion : {self.SPDMVersion.hex()}")
logger.debug(f"RequestResponseCode : {self.RequestResponseCode.hex()}")
logger.debug(f"Param1 : {self.Param1.hex()}")
logger.debug(f"Param2 : {self.Param2.hex()}")
logger.debug(f"Nonce : {self.Nonce.hex()}")
logger.debug(f"SlotIDParam : {self.SlotIDParam.hex()}")
def __init__(self,request_data):
""" The constructor method for the SpdmMeasurementRequestMessage class representing the SPDM GET_MEASUREMENT
request message.
Args:
request_data (bytes): raw SPDM GET_MEASUREMENT request message.
"""
assert type(request_data) is bytes
self.SPDMVersion = None
self.RequestResponseCode = None
self.Param1 = None
self.Param2 = None
self.Nonce = None
self.SlotIDParam = None
self.parse(request_data)
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/attestation/spdm_msrt_req_msg.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ecdsa import (
VerifyingKey,
BadSignatureError,
)
from verifier.utils import extract_public_key
from verifier.config import (
info_log,
event_log,
__author__,
__copyright__,
__version__,
)
from .spdm_msrt_resp_msg import SpdmMeasurementResponseMessage
from .spdm_msrt_req_msg import SpdmMeasurementRequestMessage
from verifier.exceptions import NoMeasurementsError
class AttestationReport:
""" A class to represent the attestation report coming from the GPU driver.
The class to encapsulate the Attestation report which comprises of the
SPDM GET MEASUREMENT request message and the SPDM GET MEASUREMENT response
message.
"""
LENGTH_OF_SPDM_GET_MEASUREMENT_REQUEST_MESSAGE = 37
def extract_response_message(self, attestation_report_data):
""" Extracts the SPDM GET_MEASUREMENT response message from the attestation report.
Args:
attestation_report_data (bytes): the attestation report coming from gpu via the nvml api.
Returns:
[bytes]: returns the extracted SPDM GET_MEASUREMENT response message.
"""
assert type(attestation_report_data) is bytes
assert len(attestation_report_data) > self.LENGTH_OF_SPDM_GET_MEASUREMENT_REQUEST_MESSAGE
response = attestation_report_data[self.LENGTH_OF_SPDM_GET_MEASUREMENT_REQUEST_MESSAGE : ]
return response
def extract_request_message(self, attestation_report_data):
""" Extracts the SPDM GET_MEASUREMENT request message from the attestation report.
Args:
attestation_report_data (bytes): the attestation report coming from gpu via the nvml api.
Returns:
[bytes]: returns the extracted SPDM GET_MEASUREMENT request message.
"""
assert type(attestation_report_data) is bytes
assert len(attestation_report_data) > self.LENGTH_OF_SPDM_GET_MEASUREMENT_REQUEST_MESSAGE
request = attestation_report_data[ : self.LENGTH_OF_SPDM_GET_MEASUREMENT_REQUEST_MESSAGE]
return request
@staticmethod
def concatenate(request_data, response_data, signature_length):
""" Computes the binary data over which the signature verification is to be done.
Args:
request_data (bytes) : the SPDM GET_MEASUREMENTS request message.
response_data (bytes) : the successful SPDM GET_MEASUREMENT response message.
signature_length (int): the size of the digital signature in number of bytes.
Returns:
[bytes]: returns the binary data whose signature verification is to be done.
"""
assert type(request_data) is bytes
assert type(response_data) is bytes
assert type(signature_length) is int
data = request_data + response_data
data = data[ : len(data) - signature_length]
return data
def verify_signature(self, certificate, signature_length, hashfunc):
""" Performs the signature verification of the attestation report.
Args:
certificate (OpenSSL.crypto.X509): The GPU attestation leaf certificate.
signature_length (int): the length of the signature field of the attestation report.
hashfunc (_hashlib.HASH): The hashlib hash function.
Returns:
[bool]: return True if the signature verification is successful
otherwise, return False.
"""
try:
event_log.debug("Extracting the public key from the certificate for the attestation report.")
public_key = extract_public_key(certificate)
verifying_key = VerifyingKey.from_pem(public_key)
event_log.debug("Extracted the public key from the certificate for the the attestation report.")
data_whose_signature_is_to_be_verified = AttestationReport.concatenate(request_data = self.request_data,
response_data = self.response_data,
signature_length = signature_length)
signature = self.get_response_message().get_signature()
event_log.debug("Verifying the signature of the attestation report.")
status = verifying_key.verify(signature, data_whose_signature_is_to_be_verified, hashfunc = hashfunc)
return status
except BadSignatureError:
return False
except Exception as error:
err_msg = "Something went wrong during attestation report signature verification."
info_log.info(err_msg)
return False
def get_measurements(self):
""" Fetches the runtime measurements from the attestation report.
Raises:
NoMeasurementsError: It is raised in case there are no or blank measurement block.
Returns:
[list]: list of measurement values.
"""
measurement_list = self.response_message.get_measurement_record().get_measurements()
event_log.debug("Runtime measurements are : \n\t\t\t\t\t\t\t{}".format('\n\t\t\t\t\t\t\t'.join(map(str, measurement_list))))
if len(measurement_list) == 0:
err_msg = "\tNo GPU runtime measurements found."
info_log.error(err_msg)
raise NoMeasurementsError(err_msg + "\n\tQuitting now.")
return measurement_list
def get_request_message(self):
""" Fetches the SPDM GET MEASUREMENT request message represented as an object of class SpdmMeasurementRequestMessage.
Returns:
[SpdmMeasurementRequestMessage]: the object representing the SPDM GET MEASUREMENT request message.
"""
return self.request_message
def get_response_message(self):
""" Fetches the SPDM GET MEASUREMENT response message represented as an object of class SpdmMeasurementResponseMessage.
Returns:
[SpdmMeasurementResponseMessage]: the object representing the SPDM GET MEASUREMENT response message.
"""
return self.response_message
def print_obj(self, logger):
""" Prints all the fields of the request and response message in the attestation report object.
Args:
logger (logging.Logger): the logger object which prints the output according to its set level.
"""
self.request_message.print_obj(logger)
self.response_message.print_obj(logger)
def __init__(self, data, settings):
""" The constructor for the attestation report class.
Args:
data (bytes): the raw attestation report data coming from the nvml api.
settings (config.HopperSettings): the setting object that have the various config info.
"""
assert type(data) is bytes
self.request_data = self.extract_request_message(data)
self.response_data = self.extract_response_message(data)
self.request_message = SpdmMeasurementRequestMessage(self.request_data)
self.response_message = SpdmMeasurementResponseMessage(self.response_data, settings)
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/attestation/__init__.py
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from verifier.utils import read_field_as_little_endian
from verifier.exceptions import (
NoMeasurementBlockError,
MeasurementSpecificationError,
ParsingError,
)
class DmtfMeasurement:
""" The class to represent the DMTF Measurement.
The structure of the Measurement when MeasurementSpecification field is bit 0 = DMTF in DMTF's SPDM 1.1 spec.
OFFSET - FIELD - SIZE(in bytes)
0 - DMTFSpecMeasurementValueType - 1
1 - DMTFSpecMeasurementValueSize - 2
3 - DMTFSpecMeasurementValue - DMTFSpecMeasurementValueSize
"""
FieldSize = {
"DMTFSpecMeasurementValueType" : 1,
"DMTFSpecMeasurementValueSize" : 2,
"DMTFSpecMeasurementValue" : None,
}
def get_measurement_value(self):
""" Fetches the measurement value.
Returns:
[bytes]: the measurement value
"""
return self.DMTFSpecMeasurementValue
def get_measurement_value_type(self):
""" Fetches the measurement value type.
Returns:
[int]: the measurement value type.
"""
return self.DMTFSpecMeasurementValueType
def get_measurement_value_size(self):
""" Fetches the measurement value size in bytes.
Returns:
[int]: the size of measurement value in bytes.
"""
return self.DMTFSpecMeasurementValueSize
def set_measurement_value(self, value):
""" Sets the measurement value field of the DMTF Measurement object.
Args:
value (bytes): the measurement value.
"""
self.DMTFSpecMeasurementValue = value
def set_measurement_value_type(self, value):
""" Sets the measurement value type field of the DMTF Measurement object.
Args:
value (int): the measurement value type as an integer.
"""
self.DMTFSpecMeasurementValueType = value
def set_measurement_value_size(self, value):
""" Sets the measurement values size field.
Args:
value (int): the measurement value size in bytes.
"""
self.DMTFSpecMeasurementValueSize = value
def parse(self, measurement_data):
""" Parses the raw DMTF Measurement data and sets the various field values of the Measurement.
Args:
measurement_data (bytes): the raw DMTF Measurement data.
"""
byte_index = 0
x = measurement_data[byte_index : byte_index + self.FieldSize['DMTFSpecMeasurementValueType']]
value = int(x.hex(), 16)
self.set_measurement_value_type(value)
byte_index = byte_index + self.FieldSize['DMTFSpecMeasurementValueType']
x = measurement_data[byte_index : byte_index + self.FieldSize['DMTFSpecMeasurementValueSize']]
value = int(read_field_as_little_endian(x), 16)
self.set_measurement_value_size(value)
byte_index = byte_index + self.FieldSize['DMTFSpecMeasurementValueSize']
value = measurement_data[byte_index : byte_index + self.get_measurement_value_size()]
self.set_measurement_value(value)
byte_index = byte_index + self.get_measurement_value_size()
def print_obj(self, logger):
""" Prints all the fields of the object representing the DMTF Measurement.
Args:
logger (logging.Logger): the logger object.
"""
logger.debug(f"DMTFSpecMeasurementValueType : {self.DMTFSpecMeasurementValueType}")
logger.debug(f"DMTFSpecMeasurementValueSize : {self.DMTFSpecMeasurementValueSize}")
logger.debug(f"DMTFSpecMeasurementValue : {self.DMTFSpecMeasurementValue.hex()}")
def __init__(self, measurement_data):
""" The constructor method for the DmtfMeasurement class representing the DMTF Measurement.
Args:
measurement_data (bytes): the raw DMTF Measurement data.
"""
assert type(measurement_data) is bytes
self.DMTFSpecMeasurementValueType = None
self.DMTFSpecMeasurementValueSize = None
self.DMTFSpecMeasurementValue = None
self.parse(measurement_data)
class MeasurementRecord:
""" Class to represent the Measurement block.
The structure of each of the Measurement block in DMTF's SPDM 1.1 spec is as follows:
OFFSET - FIELD - SIZE(in bytes)
0 - Index - 1
1 - MeasurementSpecification - 1
2 - MeasurementSize - 2
4 - Measurement - MeasurementSize
"""
FieldSize = {
"Index" : 1,
"MeasurementSpecification" : 1,
"MeasurementSize" : 2,
}
DMTF_MEASUREMENT_SPECIFICATION_VALUE = 1
def get_measurements(self):
""" Fetches all the measurement value and then returns them as a list.
Returns:
[list]: list of measurement values.
"""
measurement_list = [None] * len(self.MeasurementBlocks)
for index in self.MeasurementBlocks:
measurement_list[index-1] = self.MeasurementBlocks[index].get_measurement_value().hex()
return measurement_list
def parse(self, binary_data, settings):
""" Parses the raw measurement record data and sets the fields of the class MeasurementRecord object
representing the Measurement Record.
Args:
binary_data (bytes): the raw Measurement Record data
settings (config.HopperSettings): the object containing the various config info.
Raises:
NoMeasurementBlockError: it is raised when there are zero number of measurement blocks.
MeasurementSpecificationError: it is raised if any measurement block does not follow DMTF specification.
ParsingError: it is raised if there is any issue in the parsing of the data.
"""
assert type(binary_data) is bytes
if self.NumberOfBlocks == 0:
err_msg = "\tThere are no measurement blocks in the respone message."
raise NoMeasurementBlockError(err_msg)
byte_index = 0
for _ in range(self.NumberOfBlocks):
x = binary_data[byte_index : byte_index + self.FieldSize['Index']]
index = int(x.hex(), 16)
byte_index = byte_index + self.FieldSize['Index']
x = binary_data[byte_index : byte_index + self.FieldSize['MeasurementSpecification']]
measurement_specification = int(x.hex(), 16)
if measurement_specification != self.DMTF_MEASUREMENT_SPECIFICATION_VALUE:
raise MeasurementSpecificationError("Measurement block at index ", self.get_index(), \
" not following DMTF specification.\n\tQuitting now.")
byte_index = byte_index + self.FieldSize['MeasurementSpecification']
x = binary_data[byte_index : byte_index + self.FieldSize['MeasurementSize']]
measurement_size = int(read_field_as_little_endian(x), 16)
byte_index = byte_index + self.FieldSize['MeasurementSize']
measurement_data = binary_data[byte_index : byte_index + measurement_size]
self.MeasurementBlocks[index] = DmtfMeasurement(measurement_data)
byte_index = byte_index + measurement_size
if byte_index != len(binary_data):
err_msg = "Something went wrong while parsing the MeasurementRecord.\nQuitting now."
raise ParsingError(err_msg)
count = 0
for i in range(1,self.NumberOfBlocks + 1):
if self.MeasurementBlocks[i] is not None \
and len(self.MeasurementBlocks[i].get_measurement_value()) == self.MeasurementBlocks[i].get_measurement_value_size():
count = count + 1
if count == self.NumberOfBlocks:
settings.mark_attestation_report_measurements_as_available()
def print_obj(self, logger):
""" Prints all the field value of the class representing the Measurement Records.
Args:
logger (logging.Logger): the logger object.
"""
for i in range(1,self.NumberOfBlocks + 1):
logger.debug("----------------------------------------")
logger.debug(f"Measurement Block index : {i}")
self.MeasurementBlocks[i].print_obj(logger)
def __init__(self, measurement_record_data, number_of_blocks, settings):
""" The constructor method for the class MeasurementRecord to represent the measurement records.
Args:
measurement_record_data (bytes): the raw measurement record data
number_of_blocks (int): the number of measurement blocks
settings (config.HopperSettings): object that contains the config info.
"""
assert type(measurement_record_data) is bytes
assert type(number_of_blocks) is int
self.MeasurementBlocks = dict()
self.NumberOfBlocks = number_of_blocks
self.parse(measurement_record_data, settings)
class OpaqueData:
""" This is a class to represent the OpaqueData field in the SPDM GET_MEASUREMENT response message.
The structure of the data in this field is as follows:
[DataType(2 bytes)|DataSize(2 bytes)|Data(DataSize bytes)][DataType(2 bytes)|DataSize(2 bytes)|Data(DataSize bytes)]...
"""
OPAQUE_DATA_TYPES = {
1 : 'OPAQUE_FIELD_ID_CERT_ISSUER_NAME',
2 : 'OPAQUE_FIELD_ID_CERT_AUTHORITY_KEY_IDENTIFIER',
3 : 'OPAQUE_FIELD_ID_DRIVER_VERSION',
4 : 'OPAQUE_FIELD_ID_GPU_INFO',
5 : 'OPAQUE_FIELD_ID_SKU',
6 : 'OPAQUE_FIELD_ID_VBIOS_VERSION',
7 : 'OPAQUE_FIELD_ID_MANUFACTURER_ID',
8 : 'OPAQUE_FIELD_ID_TAMPER_DETECTION',
9 : 'OPAQUE_FIELD_ID_SMC',
10 : 'OPAQUE_FIELD_ID_VPR',
11 : 'OPAQUE_FIELD_ID_NVDEC0_STATUS',
12 : 'OPAQUE_FIELD_ID_MSRSCNT',
13 : 'OPAQUE_FIELD_ID_CPRINFO',
14 : 'OPAQUE_FIELD_ID_BOARD_ID',
15 : 'OPAQUE_FIELD_ID_CHIP_SKU',
16 : 'OPAQUE_FIELD_ID_CHIP_SKU_MOD',
17 : 'OPAQUE_FIELD_ID_PROJECT',
18 : 'OPAQUE_FIELD_ID_PROJECT_SKU',
19 : 'OPAQUE_FIELD_ID_PROJECT_SKU_MOD',
255 : 'OPAQUE_FIELD_ID_INVALID',
}
MSR_COUNT_SIZE = 4
FieldSize = {
"DataType" : 2,
"DataSize" : 2,
}
def get_data(self, field_name):
""" Fetches the field value of the given field name.
Args:
field_name (str): the name/data type of the field in the opaque data.
Returns:
[bytes] : the content of the given field name.
"""
assert type(field_name) is str
return self.OpaqueDataField[field_name]
def parse_measurement_count(self, data):
""" Parses and creates a list of measurement count values from the OpaqueData field.
Args:
data (bytes): the raw measurement count data.
Raises:
ParsingError: it is raised if the length of the data is not a multiple of MSR_COUNT_SIZE.
"""
if len(data) % self.MSR_COUNT_SIZE != 0:
raise ParsingError("Invalid size of measurement count field data.")
msr_cnt = list()
number_of_elements = len(data) // self.MSR_COUNT_SIZE
for i in range(number_of_elements):
start = i * self.MSR_COUNT_SIZE
end = start + self.MSR_COUNT_SIZE
element = data[start : end]
msr_cnt.append(int(read_field_as_little_endian(element), 16))
self.OpaqueDataField['OPAQUE_FIELD_ID_MSRSCNT'] = msr_cnt
def parse(self, binary_data):
""" Parses the raw OpaqueData field of the SPDM GET_MEASUREMENT response message.
Args:
binary_data (bytes): the data content of the Opaque Data field.
"""
byte_index = 0
while byte_index < len(binary_data):
x = binary_data[byte_index : byte_index + self.FieldSize['DataType']]
value = int(read_field_as_little_endian(x), 16)
data_type = self.OPAQUE_DATA_TYPES[value]
byte_index = byte_index + self.FieldSize['DataType']
x = binary_data[byte_index : byte_index + self.FieldSize['DataSize']]
data_size = int(read_field_as_little_endian(x), 16)
byte_index = byte_index + self.FieldSize['DataSize']
value = binary_data[byte_index : byte_index + data_size]
if data_type == 'OPAQUE_FIELD_ID_MSRSCNT':
self.parse_measurement_count(value)
else:
self.OpaqueDataField[data_type] = value
byte_index = byte_index + data_size
def print_obj(self, logger):
""" Prints all the field content in the Opaque Data.
Args:
logger (logging.Logger): the logger object.
"""
for field in self.OpaqueDataField:
logger.debug(f"{field} : {self.OpaqueDataField[field]}")
def __init__(self, binary_data):
""" The constructor method for the class representing the OpaqueData.
Args:
binary_data (bytes): the Opaque data content.
"""
assert type(binary_data) is bytes
self.OpaqueDataField = dict()
self.parse(binary_data)
class SpdmMeasurementResponseMessage:
""" Class to represent the SPDM GET_MEASUREMENT response message.
Following is the expected structure of the Successful MEASUREMENTS response message in DMTF's SPDM 1.1 spec.
OFFSET - FIELD - SIZE(in bytes)
0 - SPDMVersion - 1
1 - RequestResponseCode - 1
2 - Param1 - 1
3 - Param2 - 1
4 - NumberOfBlocks - 1
5 - MeasurementRecordLength - 3
8 - MeasurementRecord - L1 = MeasurementRecordLength
8+L1 - Nonce - 32
40+L1 - OpaqueLength - 2
42+L1 - OpaqueData - L2 = OpaqueLength
42+L1+L2 - Signature - 64
"""
FieldSize = {
"SPDMVersion" : 1,
"RequestResponseCode" : 1,
"Param1" : 1,
"Param2" : 1,
"NumberOfBlocks" : 1,
"MeasurementRecordLength" : 3,
"Nonce" : 32,
"OpaqueLength" : 2,
}
def get_spdm_version(self):
""" Fetches the SPDMVersion of the object representing the SPDM GET_MEASUREMENT response message.
Returns:
[bytes]: the SPDM version
"""
return self.SPDMVersion
def set_spdm_version(self, value):
""" Sets the SPDMVersion field of the object representing the SPDM GET_MEASUREMENT response message.
Args:
value (bytes): the SPDM version
"""
self.SPDMVersion = value
def get_request_response_code(self):
""" Fetches the RequestResponseCode field of the object representing the SPDM GET_MEASUREMENT response.
Returns:
[bytes]: the RequestResponse value.
"""
return self.RequestResponseCode
def set_request_response_code(self, value):
""" Sets the RequestResponseCode field of the object representing the SPDM GET_MEASUREMENT response.
Args:
value (bytes): the RequestResponse value.
"""
self.RequestResponseCode = value
def get_param1(self):
""" Fetches the Param1 field of the object representing the SPDM GET_MEASUREMENT response.
Returns:
[bytes]: the Param1 value.
"""
return self.Param1
def set_param1(self, value):
""" Sets the Param1 field of the object representing the SPDM GET_MEASUREMENT response.
Args:
value (bytes): the Param1 value.
"""
self.Param1 = value
def get_param2(self):
""" Fetches the Param2 field of the object representing the SPDM GET_MEASUREMENT response.
Returns:
[bytes]: the Param2 value
"""
return self.Param2
def set_param2(self, value):
""" Sets the Param2 field of the object representing the SPDM GET_MEASUREMENT response.
Args:
value (bytes): the Param2 value.
"""
self.Param2 = value
def get_number_of_blocks(self):
""" Fetches the number of measurement blocks field of the object representing the SPDM GET_MEASUREMENT response.
Returns:
[int]: the Number of blocks.
"""
return self.NumberOfBlocks
def set_number_of_blocks(self, value):
""" Sets the number of measurement blocks field of the object representing the SPDM GET_MEASUREMENT response.
Args:
value (int): the number of blocks.
"""
self.NumberOfBlocks = value
def get_measurement_record_length(self):
""" Fetches the length of the measurement record length field of the object representing the SPDM GET_MEASUREMENT response.
Returns:
[int]: the length of measurement record in bytes.
"""
return self.MeasurementRecordLength
def set_measurement_record_length(self, value):
""" Sets the length of the measurement record length field of the object representing the SPDM GET_MEASUREMENT response.
Args:
value (int): the length of measurement records in bytes.
"""
self.MeasurementRecordLength = value
def get_measurement_record(self):
""" Fetches the MeasurementRecord object representing the measurement record of the SPDM GET_MEASUREMENT response.
Returns:
[MeasurementRecord]: the object representing the measurement record.
"""
return self.MeasurementRecord
def set_measurement_record(self, value):
""" Assigns the MeasurementRecord object to the MeasurementRecord field of the SpdmMeasurementResponseMessage class.
Args:
value (MeasurementRecord): the MeasurementRecord class object.
"""
self.MeasurementRecord = value
def get_nonce(self):
""" Fetches the Nonce field of the object representing the SPDM GET_MEASUREMENT response.
Returns:
[bytes]: the nonce value.
"""
return self.Nonce
def set_nonce(self, value):
""" Sets the Nonce field value of the object representing the SPDM GET_MEASUREMENT response.
Args:
value (bytes): the nonce value.
"""
self.Nonce = value
def get_opaque_data_length(self):
""" Fetches the length of OpaqueData field of the object representing the SPDM GET_MEASUREMENT response.
Returns:
[int]: the length of Opaque Data in bytes.
"""
return self.OpaqueLength
def set_opaque_data_length(self, value):
""" Sets the length of opaque data field of the object representing the SPDM GET_MEASUREMENT response.
Args:
value (int): the length of Opaque data in bytes.
"""
self.OpaqueLength = value
def get_opaque_data(self):
""" Fetches the OpaqueData class object representing the Opaque data in the SPDM GET_MEASUREMENT response.
Returns:
[OpaqueData]: object of class OpaqueData.
"""
return self.OpaqueData
def get_signature(self):
""" Fetches the signature field content of the SpdmMeasurementResponseMessage class object.
Returns:
[bytes]: the signature value.
"""
return self.Signature
def set_signature(self, value):
""" Assigns the signature field value of the SpdmMeasurementResponseMessage class object.
Args:
value (bytes): the signature value.
"""
self.Signature = value
def parse(self, response, settings):
""" Parses the raw SPDM GET_MEASUREMENT response message and sets the various fields of the SpdmMeasurementResponseMessage class object.
Args:
response (bytes): the raw data content of the SPDM GET_MEASUREMENT response message.
settings (config.HopperSettings): object that contains the config info.
"""
assert type(response) is bytes
byte_index = 0
value = response[byte_index : byte_index + self.FieldSize['SPDMVersion']]
self.set_spdm_version(value)
byte_index = byte_index + self.FieldSize['SPDMVersion']
value = response[byte_index : byte_index + self.FieldSize['RequestResponseCode']]
self.set_request_response_code(value)
byte_index = byte_index + self.FieldSize['RequestResponseCode']
value = response[byte_index : byte_index + self.FieldSize['Param1']]
self.set_param1(value)
byte_index = byte_index + self.FieldSize['Param1']
value = response[byte_index : byte_index + self.FieldSize['Param2']]
self.set_param2(value)
byte_index = byte_index + self.FieldSize['Param2']
x = response[byte_index : byte_index + self.FieldSize['NumberOfBlocks']]
value = int(x.hex(), 16)
self.set_number_of_blocks(value)
byte_index = byte_index + self.FieldSize['NumberOfBlocks']
x = response[byte_index : byte_index + self.FieldSize['MeasurementRecordLength']]
value = int(read_field_as_little_endian(x), 16)
self.set_measurement_record_length(value)
byte_index = byte_index + self.FieldSize['MeasurementRecordLength']
measurement_record = response[byte_index : byte_index + self.get_measurement_record_length()]
self.set_measurement_record(MeasurementRecord(measurement_record, self.get_number_of_blocks(), settings))
byte_index = byte_index + self.get_measurement_record_length()
value = response[byte_index : byte_index + self.FieldSize['Nonce']]
self.set_nonce(value)
byte_index = byte_index + self.FieldSize['Nonce']
x = response[byte_index : byte_index + self.FieldSize['OpaqueLength']]
x = read_field_as_little_endian(x)
value = int(x, 16)
self.set_opaque_data_length(value)
byte_index = byte_index + self.FieldSize['OpaqueLength']
opaque_data_content = response[byte_index : byte_index + self.get_opaque_data_length()]
self.OpaqueData = OpaqueData(opaque_data_content)
byte_index = byte_index + self.get_opaque_data_length()
value = response[byte_index : byte_index + self.FieldSize['Signature']]
self.set_signature(value)
byte_index = byte_index + self.FieldSize['Signature']
def print_obj(self, logger):
""" Prints all the fields of the class SpdmMeasurementResponseMessage representing the SPDM GET_MEASUREMENT response message.
Args:
logger (logging.Logger): the logger object.
"""
logger.debug("GET MEASUREMENT RESPONSE MESSAGE")
logger.debug(f"SPDMVersion : {self.SPDMVersion.hex()}")
logger.debug(f"RequestResponseCode : {self.RequestResponseCode.hex()}")
logger.debug(f"Param1 : {self.Param1.hex()}")
logger.debug(f"Param2 : {self.Param2.hex()}")
logger.debug(f"NumberOfBlocks : {self.NumberOfBlocks}")
logger.debug(f"MeasurementRecordLength : {self.MeasurementRecordLength}")
logger.debug(f"MeasurementRecord :")
self.MeasurementRecord.print_obj(logger)
logger.debug(f"Nonce : {self.Nonce.hex()}")
logger.debug(f"OpaqueLength : {self.OpaqueLength}")
logger.debug(f"OpaqueData :")
self.OpaqueData.print_obj(logger)
logger.debug(f"Signature : {self.Signature.hex()}")
def __init__(self, response, settings):
""" The constructor method for the class SpdmMeasurementResponseMessage representing the SPDM GET_MEASUREMENT response message.
Args:
response (bytes): The raw SPDM GET_MEASUREMENT response message.
settings (config.HopperSettings): the object containing various config.
Raises:
ParsingError: _description_
"""
assert type(response) is bytes
self.SPDMVersion = None
self.RequestResponseCode = None
self.Param1 = None
self.Param2 = None
self.NumberOfBlocks = None
self.MeasurementRecordLength = None
self.MeasurementRecord = None
self.Nonce = None
self.OpaqueLength = None
self.OpaqueData = None
self.Signature = None
self.FieldSize['Signature'] = settings.signature_length
try:
self.parse(response, settings)
except Exception as error:
raise ParsingError("Could not parse the GET MEASUREMENT response message.")
|
nvtrust-main
|
guest_tools/gpu_verifiers/local_gpu_verifier/src/verifier/attestation/spdm_msrt_resp_msg.py
|
nvtrust-main
|
guest_tools/attestation_sdk/__init__.py
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
import nv_attestation_sdk
from nv_attestation_sdk import attestation
## testing
client = attestation.Attestation("inital-name")
print("Expecting initial-name")
print("node name :", client.get_name())
client.set_name("thisNode1")
print("Expecting ThisNode1")
print("node name :", client.get_name())
# will use the same singleton
client2 = attestation.Attestation()
print("Expecting ThisNode1")
print("node name :", client2.get_name())
my_evidence_policy = """version=1.0;
authorizationrules
{
c:[type="secureBootEnabled", issuer=="AttestationService"]=> permit()
};
issuancerules
{
c:[type="secureBootEnabled", issuer=="AttestationService"]=> issue(claim=c)
c:[type="notSafeMode", issuer=="AttestationService"]=> issue(claim=c)
};"""
my_results_policy = """version=1.0;
authorizationrules
{
c:[type="secureBootEnabled", issuer=="AttestationService"]=> permit()
};
issuancerules
{
c:[type="secureBootEnabled", issuer=="AttestationService"]=> issue(claim=c)
c:[type="notSafeMode", issuer=="AttestationService"]=> issue(claim=c)
};"""
print("Show verifiers - should be empty")
print(client.get_verifiers())
print("Add TEST CPU verifier")
client.add_verifier(attestation.Devices.CPU, attestation.Environment.TEST, "https://foo.com", my_evidence_policy)
print(client.get_verifiers())
print("Add TEST GPU verifier")
client.add_verifier(attestation.Devices.GPU, attestation.Environment.TEST, "https://foo.com", my_evidence_policy)
print(client.get_verifiers())
print("attest")
client.attest()
print("try to get_token() - should get token")
t = client.get_token()
print ("my token is : "+t)
print("try to get token with \"\" - should be nothing")
print ("my token is : "+client.get_token(""))
print("validate_token testing. currently token is"+t)
print("call validate_token() - expecting True")
print(client.validate_token(""))
print("call validate_token(\"\") - expecting False")
print(client.validate_token("",""))
print("call validate_token(\"foo\") - expecting False")
print(client.validate_token("", "foo"))
print("call validate_token(<token<) - expecting true")
print(client.validate_token("", t))
print("call validate_token([]) - expecting False (it's a list)")
print(client.validate_token("", []))
print("call validate_token({}) - expecting False (it's an empty dict)")
print(client.validate_token("", {}))
print("before setting server - expecting null")
print(client.get_nonce())
print("set nonce")
print(client.set_nonce("0xdeadbeef"))
print("getting nonce - expecting deadbeef")
print(client.get_nonce())
print("generating nonce - expecting something completely differeng")
print(client._generate_nonce())
print("setting nonce server - good luck with that")
client.set_nonce_server("https://foo.com/nonce")
print("after setting server")
print(client._generate_nonce())
|
nvtrust-main
|
guest_tools/attestation_sdk/tests/AttestationTest.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from nv_attestation_sdk import attestation
client = attestation.Attestation()
client.set_name("thisNode1")
print ("[SmallGPUTest] node name :", client.get_name())
client.add_verifier(attestation.Devices.GPU, attestation.Environment.LOCAL, "", "")
attestation_results_policy = '{"version":"1.0","authorization-rules":{"x-nv-gpu-available":true,' \
'"x-nv-gpu-attestation-report-available":true,"x-nv-gpu-info-fetched":true,' \
'"x-nv-gpu-arch-check":true,"x-nv-gpu-root-cert-available":true,' \
'"x-nv-gpu-cert-chain-verified":true,"x-nv-gpu-ocsp-cert-chain-verified":true,' \
'"x-nv-gpu-ocsp-signature-verified":true,"x-nv-gpu-cert-ocsp-nonce-match":true,' \
'"x-nv-gpu-cert-check-complete":true,"x-nv-gpu-measurement-available":true,' \
'"x-nv-gpu-attestation-report-parsed":true,"x-nv-gpu-nonce-match":true,' \
'"x-nv-gpu-attestation-report-driver-version-match":true,' \
'"x-nv-gpu-attestation-report-vbios-version-match":true,' \
'"x-nv-gpu-attestation-report-verified":true,"x-nv-gpu-driver-rim-schema-fetched":true,' \
'"x-nv-gpu-driver-rim-schema-validated":true,"x-nv-gpu-driver-rim-cert-extracted":true,' \
'"x-nv-gpu-driver-rim-signature-verified":true,' \
'"x-nv-gpu-driver-rim-driver-measurements-available":true,' \
'"x-nv-gpu-driver-vbios-rim-fetched":true,"x-nv-gpu-vbios-rim-schema-validated":true,' \
'"x-nv-gpu-vbios-rim-cert-extracted":true,"x-nv-gpu-vbios-rim-signature-verified":true,' \
'"x-nv-gpu-vbios-rim-driver-measurements-available":true,' \
'"x-nv-gpu-vbios-index-conflict":true,"x-nv-gpu-measurements-match":true}}'
print(client.get_verifiers())
print ("[SmallGPUTest] call attest() - expecting True")
print(client.attest())
print ("[SmallGPUTest] token : "+str(client.get_token()))
print ("[SmallGPUTest] call validate_token() - expecting True")
print(client.validate_token(attestation_results_policy))
|
nvtrust-main
|
guest_tools/attestation_sdk/tests/SmallGPUTest.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from nv_attestation_sdk import attestation
client = attestation.Attestation("thisNode44")
print("node name :", client.get_name())
client.add_verifier(attestation.Devices.GPU, attestation.Environment.LOCAL, "", "")
print(client.get_verifiers())
client.add_verifier(attestation.Devices.CPU, attestation.Environment.TEST, "", "")
print(client.get_verifiers())
print("call attest() - expecting True")
print(client.attest())
t = client.get_token()
print ("my token is : "+t)
print("call validate_token() - expecting True")
attestation_results_policy = '{"version":"1.0","authorization-rules":{"x-nv-gpu-available":true,"x-nv-gpu-attestation-report-available":true,"x-nv-gpu-info-fetched":true,"x-nv-gpu-arch-check":true,"x-nv-gpu-root-cert-available":true,"x-nv-gpu-cert-chain-verified":true,"x-nv-gpu-ocsp-cert-chain-verified":true,"x-nv-gpu-ocsp-signature-verified":true,"x-nv-gpu-cert-ocsp-nonce-match":true,"x-nv-gpu-cert-check-complete":true,"x-nv-gpu-measurement-available":true,"x-nv-gpu-attestation-report-parsed":true,"x-nv-gpu-nonce-match":true,"x-nv-gpu-attestation-report-driver-version-match":true,"x-nv-gpu-attestation-report-vbios-version-match":true,"x-nv-gpu-attestation-report-verified":true,"x-nv-gpu-driver-rim-schema-fetched":true,"x-nv-gpu-driver-rim-schema-validated":true,"x-nv-gpu-driver-rim-cert-extracted":true,"x-nv-gpu-driver-rim-signature-verified":true,"x-nv-gpu-driver-rim-driver-measurements-available":true,"x-nv-gpu-driver-vbios-rim-fetched":true,"x-nv-gpu-vbios-rim-schema-validated":true,"x-nv-gpu-vbios-rim-cert-extracted":true,"x-nv-gpu-vbios-rim-signature-verified":true,"x-nv-gpu-vbios-rim-driver-measurements-available":true,"x-nv-gpu-vbios-index-conflict":true,"x-nv-gpu-measurements-match":true}}'
print(client.validate_token(attestation_results_policy, t))
|
nvtrust-main
|
guest_tools/attestation_sdk/tests/SmallCombinedTest.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
import sys
import os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import src.nv_attestation_sdk.attestation as attestation
# from nv_attestation_sdk import *
# location of src
## testing
client = attestation.Attestation()
client.set_name("thisNode1")
print("node name :", client.get_name())
#client.add_verifier(attestation.Devices.CPU, attestation.Environment.LOCAL, "https://foo.com")
client.add_verifier(attestation.Devices.GPU, attestation.Environment.LOCAL, "", "", "")
client.attest()
|
nvtrust-main
|
guest_tools/attestation_sdk/tests/Test1.py
|
nvtrust-main
|
guest_tools/attestation_sdk/tests/__init__.py
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from nv_attestation_sdk import attestation
client = attestation.Attestation("have a nice day")
print("node name :", client.get_name())
client.add_verifier(attestation.Devices.CPU, attestation.Environment.TEST, "", "")
print(client.get_verifiers())
print("call attest() - expecting True")
print(client.attest())
print ("my token is : "+str(client.get_token()))
print("call validate_token() - expecting True")
print(client.validate_token(""))
|
nvtrust-main
|
guest_tools/attestation_sdk/tests/SmallFauxTest.py
|
nvtrust-main
|
guest_tools/attestation_sdk/src/__init__.py
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
from enum import IntFlag
from enum import IntEnum
from datetime import datetime
from nv_attestation_sdk.gpu import attest_gpu
import secrets
import jwt
import json
class Devices(IntFlag):
CPU = 1
GPU = 2
NIC = 4
OS = 8
DPU = 16
class Environment(IntEnum):
TEST = 1
LOCAL = 2
AZURE = 3
GCP = 4
class VerifierFields(IntEnum):
NAME = 0
DEVICE= 1
ENVIRONMENT = 2
URL = 3
POLICY = 4
JWT_TOKEN = 5
class Attestation(object):
_instance = None
def __new__(cls, name=None):
if cls._instance is None:
cls._instance = super(Attestation, cls).__new__(cls)
if isinstance(name,str):
cls._name = name
else:
cls._name = ""
cls._nonceServer = ""
cls._staticNonce = ""
cls._verifiers = []
cls._tokens = {}
return cls._instance
@classmethod
def set_name(cls, name: str) -> None:
cls._name = name
@classmethod
def get_name(cls) -> str:
return cls._name
@classmethod
def set_nonce_server(cls, url: str) -> None:
cls._nonceServer = url
@classmethod
def get_nonce_server(cls) -> str:
return cls._nonceServer
@classmethod
def add_verifier(cls, dev: Devices, env: Environment, url: str, evidence: str) -> None:
"""
Add a new verifier to SDK configuration.
This will be used during attest and validate_token methods.
"""
if (dev == Devices.GPU and env == Environment.LOCAL) :
name = "LOCAL_GPU_CLAIMS"
elif (dev == Devices.CPU and env == Environment.TEST) :
name = "TEST_CPU_CLAIMS"
else :
name = "UNKNOWN_CLAIMS"
lst = [name, dev, env, url, evidence, ""]
cls._verifiers.append(lst)
@classmethod
def get_verifiers(cls) -> list:
"""
Get the list of configured verifiers.
"""
return cls._verifiers
@classmethod
def attest(cls) -> bool:
"""
Attest the client as per the configured verifiers and evidence policy
"""
# this should consist of doing the following things
# Nonce _generateNonce()
# Evidence generateEvidence(nonce)
# Retrieve quote from vTPM (locally)
# Token verifyEvidence(evidence)
# Evidence -> verifier, validated against policy, returns token
# Status provideEvidence(token)
# Token -> relying party, returns Status
# cls.token = ""
for verifier in cls._verifiers:
attest_result = True
if verifier[VerifierFields.DEVICE] == Devices.GPU and verifier[VerifierFields.ENVIRONMENT] == Environment.LOCAL:
this_result, jwt_token = attest_gpu.attest_gpu_local()
# save the token with the verifier
verifier[VerifierFields.JWT_TOKEN] = jwt_token
attest_result = attest_result and this_result
elif verifier[VerifierFields.DEVICE] == Devices.CPU and verifier[VerifierFields.ENVIRONMENT] == Environment.TEST:
report = {}
report["rand"] = secrets.token_hex(16)
report["hash"] = str(hash(report["rand"]))
jwt_token = jwt.encode(report, "notasecret", algorithm="HS256")
# save the token with the verifier
verifier[VerifierFields.JWT_TOKEN] = jwt_token
attest_result = attest_result and True
else:
# probably should throw an exception here
print("unknown verifier - assuming all is good - device is " + str(verifier[VerifierFields.DEVICE]) + " env is "+str(verifier[VerifierFields.ENVIRONMENT]))
# NOTE: no verifiers means attestation will be true. weird but makes some sense
# NOTE: THIS is where the tokens should be combined in to a single token and then set
#print("full attest_result ... ", attest_result) # NOTE: put a try catch here
eatToken = cls._create_EAT()
cls.set_token( cls._name, eatToken)
return attest_result
@classmethod
def _create_EAT(cls) -> str:
#
# What is an EAT
#
# An EAT is a list with two elements let's call them A and B
# element A is a list where the first element is "JWT" and the second element is a JWT Token
# element B is a dictionary of claims where each element is indexed by a name and the value
# is a JWT Token of the claims attested for said name
# or at least that is what the spec suggests
# JWT has a very different idea and wants just a dictionarey object.
#
# Therefore a JSON-encoded Detached EAT bundle is defined as
# {
# "JWT" : JWT of main claims
# zero or more of the following
# "verifier name" : JWT of this verifier
# }
issuer = "NV-Attestation-SDK"
curr_dt = datetime.now()
timestamp = int(round(curr_dt.timestamp()))
payload = { "iss" : issuer, "iat" : timestamp, "exp": None }
encoded_jwt = jwt.encode ( payload, "notasecret", algorithm="HS256")
eat = []
eat_inner = ["JWT",encoded_jwt]
verifier_claims = {}
for verifier in cls._verifiers:
if verifier[VerifierFields.JWT_TOKEN] != "":
verifier_claims[ verifier[VerifierFields.NAME] ] = verifier[VerifierFields.JWT_TOKEN]
eat.append (eat_inner)
eat.append (verifier_claims)
return json.dumps(eat)
@classmethod
def set_token(cls, name: str, eat_token: str) -> None:
entry = {name: eat_token}
cls._tokens.update(entry)
@classmethod
def get_token(cls, x=None) -> str:
name = ""
if x == None:
name = cls.get_name()
elif isinstance(x, str):
name = x
if name == "":
return ""
if name in cls._tokens.keys():
return cls._tokens[name]
else:
return ""
@classmethod
def _validate_token_internal(cls, policy:str, eat_token: str) -> bool:
attest_result = True
if eat_token == "":
return False
else:
try:
eat = json.loads(eat_token)
except json.decoder.JSONDecodeError:
return False
eat_jwt = eat[0]
eat_claims = eat[1]
for verifier_name in eat_claims:
jwt_token = eat_claims[verifier_name]
if verifier_name == "LOCAL_GPU_CLAIMS":
this_result = attest_gpu.validate_gpu_token(jwt_token, policy)
elif verifier_name == "TEST_CPU_CLAIMS":
claims = jwt.decode( jwt_token, "notasecret", algorithms="HS256")
randStr = claims["rand"]
hashStr = claims["hash"]
if hashStr == str(hash(randStr)):
this_result = True
else:
this_result = False
else:
#Unknown verifier - assume it's OK
this_result = True
attest_result = this_result and attest_result
return attest_result
@classmethod
def validate_token(cls, policy:str , x=None) :
if x == None:
name = cls.get_name()
if name == "":
return False
else:
if name in cls._tokens.keys():
token = cls._tokens[name]
else:
return False
return cls._validate_token_internal(policy, token)
elif isinstance(x,str):
if x == "":
return False
else:
return cls._validate_token_internal(policy, x)
elif isinstance(x,list):
return False
# this part could use some bullet proofing
elif isinstance(x,dict):
retdict = {}
for name in x:
if (name != ""):
token = x[name]
if (token != ""):
retdict[name] = cls._validate_token_internal(token)
else:
retdict[name] = False
return retdict
else:
return False
@classmethod
def _generate_nonce(cls) -> str:
# Check for the nonce server AND the name. If one is missing, generate a local nonce
if cls._nonceServer != "" and cls._name != "" :
# probably should only do this if name and url are non-null
# make call to url to get nonce
return "0xdeadbeefdeadbeefdeadbeefdeadbeef"
else:
# create nonce locally - 256 bits total
nonceStr = "0x" + secrets.token_hex(16)
return nonceStr
@classmethod
def get_nonce(cls) -> str:
return cls._staticNonce
@classmethod
def set_nonce(cls, nonce:str) :
cls._staticNonce = nonce
|
nvtrust-main
|
guest_tools/attestation_sdk/src/nv_attestation_sdk/attestation.py
|
nvtrust-main
|
guest_tools/attestation_sdk/src/nv_attestation_sdk/gpu/__init__.py
|
|
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
import json
import jwt
from verifier import cc_admin
def validate_gpu_token(gpu_token: str, policy: str):
if policy == "" or gpu_token == "":
return False
policy_obj = json.loads(policy)
gpu_token_obj = jwt.decode(gpu_token, algorithms='HS256', verify=False, key="secret")
auth_rules = policy_obj['authorization-rules']
for key in auth_rules:
if not (key in gpu_token_obj and gpu_token_obj[key] == auth_rules[key]):
print("\t[ERROR] Invalid token. Authorized claims does not match the appraisal policy: ", key)
return False
return True
def attest_gpu_local():
attestation_result = False
jwt_token = ""
try:
params = {"verbose": False,
"test_no_gpu": False,
"driver_rim": "/usr/share/nvidia/rim/RIM_GH100PROD.swidtag",
"vbios_rim": None,
"user_mode": True}
attestation_result, jwt_token = cc_admin.attest(params)
except Exception as e:
print("\tException: ", e)
jwt_token = jwt.encode({'x-nv-err-message': "GPU_ATTESTATION_ERR"},
'secret',
"HS256")
return attestation_result, jwt_token
|
nvtrust-main
|
guest_tools/attestation_sdk/src/nv_attestation_sdk/gpu/attest_gpu.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from __future__ import print_function
from enum import Enum
import os
import mmap
import struct
from struct import Struct
import time
import sys
import random
import optparse
import traceback
from logging import debug, info, warning, error
import logging
from collections import namedtuple
import zlib
from pathlib import Path
#import debugpy
import ctypes
c_uint8 = ctypes.c_uint8
c_uint16 = ctypes.c_uint16
c_uint32 = ctypes.c_uint32
# Use SystemRandom() as the default rng, --weak-random option can switch it
# back to the default.
rng = random.SystemRandom()
if hasattr(time, "perf_counter"):
perf_counter = time.perf_counter
else:
perf_counter = time.time
import platform
is_windows = platform.system() == "Windows"
is_linux = platform.system() == "Linux"
is_sysfs_available = is_linux
use_nvpex = False
# By default use /dev/mem for MMIO, can be changed with --mmio-access-type sysfs
mmio_access_type = "devmem"
VERSION = "535.86.06"
GPU_BAR0_SIZE = 16 * 1024 * 1024
NVSWITCH_BAR0_SIZE = 32 * 1024 * 1024
NV_PMC_ENABLE = 0x200
NV_PMC_DEVICE_ENABLE = 0x600
NV_PMC_BOOT_0 = 0x0
NV_PROM_DATA = 0x300000
def NV_PPWR_NPU_IMEMD(i):
return 0x10a184 + i * 16
def NV_PPWR_NPU_IMEMC(i):
return 0x10a180 + i * 16
NV_PPWR_NPU_IMEMC_AINCW_TRUE = 1 << 24
NV_PPWR_NPU_IMEMC_AINCR_TRUE = 1 << 25
NV_PPWR_NPU_IMEMC_SECURE_ENABLED = 1 << 28
def NV_PPWR_NPU_IMEMT(i):
return 0x10a188 + i * 16
def NV_PPWR_NPU_DMEMC(i):
return 0x0010a1c0 + i * 8
NV_PPWR_NPU_CPUCTL = 0x10a100
NV_PPWR_NPU_HWCFG = 0x10a108
NV_PPWR_NPU_HWCFG1 = 0x10a12c
SYS_DEVICES = "/sys/bus/pci/devices/"
def sysfs_find_parent(device):
device = os.path.basename(device)
for device_dir in os.listdir(SYS_DEVICES):
dev_path = os.path.join(SYS_DEVICES, device_dir)
for f in os.listdir(dev_path):
if f == device:
return dev_path
return None
def find_gpus_sysfs(bdf_pattern=None):
gpus = []
other = []
dev_paths = []
for device_dir in os.listdir("/sys/bus/pci/devices/"):
dev_path = os.path.join("/sys/bus/pci/devices/", device_dir)
bdf = device_dir
if bdf_pattern:
if bdf_pattern not in bdf:
continue
vendor = open(os.path.join(dev_path, "vendor")).readlines()
vendor = vendor[0].strip()
if vendor != "0x10de":
continue
cls = open(os.path.join(dev_path, "class")).readlines()
cls = cls[0].strip()
if cls != "0x030000" and cls != "0x030200" and cls != "0x068000":
continue
dev_paths.append(dev_path)
def devpath_to_id(dev_path):
bdf = os.path.basename(dev_path)
return int(bdf.replace(":","").replace(".",""), base=16)
dev_paths = sorted(dev_paths, key=devpath_to_id)
for dev_path in dev_paths:
gpu = None
cls = open(os.path.join(dev_path, "class")).readlines()
cls = cls[0].strip()
try:
if cls == "0x068000":
dev = NvSwitch(dev_path=dev_path)
else:
dev = Gpu(dev_path=dev_path)
except UnknownGpuError as err:
error("Unknown Nvidia device %s: %s", dev_path, str(err))
dev = NvidiaDevice(dev_path=dev_path)
other.append(dev)
continue
except Exception as err:
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
error("GPU %s broken: %s", dev_path, str(err))
dev = BrokenGpu(dev_path=dev_path)
gpus.append(dev)
return (gpus, other)
global_nvpex = None
def find_gpus(bdf=None):
if is_sysfs_available:
return find_gpus_sysfs(bdf)
else:
assert bdf != None
return find_gpus_one_bdf_only(bdf)
def _struct_fmt(size):
if size == 1:
return "B"
elif size == 2:
return "=H"
elif size == 4:
return "=I"
elif size == 8:
return "=Q"
else:
assert 0, "Unhandled size %d" % size
def ints_from_data(data, size):
fmt = _struct_fmt(size)
# Wrap data in bytes() for python 2.6 compatibility
data = bytes(data)
ints = []
for offset in range(0, len(data), size):
ints.append(struct.unpack(fmt, data[offset : offset + size])[0])
return ints
def int_from_data(data, size):
fmt = _struct_fmt(size)
# Wrap data in bytes() for python 2.6 compatibility
return struct.unpack(fmt, bytes(data))[0]
def data_from_int(integer, size):
fmt = _struct_fmt(size)
return struct.pack(fmt, integer)
class FileRaw(object):
def __init__(self, path, offset, size):
self.fd = os.open(path, os.O_RDWR | os.O_SYNC)
self.base_offset = offset
self.size = size
def __del__(self):
if hasattr(self, "fd"):
os.close(self.fd)
def write(self, offset, data, size):
os.lseek(self.fd, offset, os.SEEK_SET)
os.write(self.fd, data_from_int(data, size))
def write8(self, offset, data):
self.write(offset, data, 1)
def write16(self, offset, data):
self.write(offset, data, 2)
def write32(self, offset, data):
self.write(offset, data, 4)
def read(self, offset, size):
os.lseek(self.fd, offset, os.SEEK_SET)
data = os.read(self.fd, size)
assert data, "offset %s size %d %s" % (hex(offset), size, data)
return int_from_data(data, size)
def read8(self, offset):
return self.read(offset, 1)
def read16(self, offset):
return self.read(offset, 2)
def read32(self, offset):
return self.read(offset, 4)
def read_format(self, fmt, offset):
size = struct.calcsize(fmt)
os.lseek(self.fd, offset, os.SEEK_SET)
data = os.read(self.fd, size)
return struct.unpack(fmt, data)
class FileMap(object):
# At least with python 2.7 and 3.4, writing to an mmap.mmap() region (or
# ctypes pointer for it) results in the writes being duplicated which
# breaks GPU MMIO semantics. Creating a numpy array WARs this issue. The
# suspicion is that without numpy, memcpy() is used for the writes and its
# optimization results in the writes being duplicated. On python 2.6 this
# has not been observed so far and the numpy wrapper is skipped to lessen
# the support requirements.
use_numpy = sys.version_info[0] != 2 or sys.version_info[1] != 6
def __init__(self, path, offset, size):
self.size = size
with open(path, "r+b") as f:
prot = mmap.PROT_READ | mmap.PROT_WRITE
# Try mmap.mmap() first for error checking even if we end up using numpy
mapped = mmap.mmap(f.fileno(), size, mmap.MAP_SHARED, prot, offset=offset)
if self.__class__.use_numpy:
import ctypes
import numpy
mapped = libc.mmap(ctypes.c_void_p(None), ctypes.c_size_t(size), ctypes.c_int(prot),
ctypes.c_int(mmap.MAP_SHARED), ctypes.c_int(f.fileno()), ctypes.c_long(offset))
if mapped == 0xffffffffffffffff:
raise GpuError("Can't mmap '{0}'".format(path))
self.mapped = mapped
self.map_8 = ctypes.cast(mapped, ctypes.POINTER(ctypes.c_uint8))
self.map_16 = ctypes.cast(mapped, ctypes.POINTER(ctypes.c_uint16))
self.map_32 = ctypes.cast(mapped, ctypes.POINTER(ctypes.c_uint32))
self.map_8 = numpy.ctypeslib.as_array(self.map_8, shape=(size,))
self.map_16 = numpy.ctypeslib.as_array(self.map_16, shape=(size//2,))
self.map_32 = numpy.ctypeslib.as_array(self.map_32, shape=(size//4,))
else:
self.mapped = mapped
def __del__(self):
if self.__class__.use_numpy:
if hasattr(self, "mapped"):
libc.munmap(self.mapped, ctypes.c_size_t(self.size))
if use_numpy:
def write32(self, offset, data):
self.map_32[offset // 4] = data
def write16(self, offset, data):
self.map_16[offset // 2] = data
def write8(self, offset, data):
self.map_8[offset // 1] = data
else:
def write32(self, offset, data):
self.mapped[offset : offset + 4] = struct.pack("=I", data)
if use_numpy:
def read(self, offset, size):
if size == 1:
return self.map_8[offset // 1]
elif size == 2:
return self.map_16[offset // 2]
elif size == 4:
return self.map_32[offset // 4]
else:
assert 0, "Unhandled size %d" % size
else:
def read(self, offset, size):
fmt = _struct_fmt(size)
return struct.unpack(fmt, self.mapped[offset : offset + size])[0]
def read8(self, offset):
return self.read(offset, 1)
def read16(self, offset):
return self.read(offset, 2)
def read32(self, offset):
return self.read(offset, 4)
# Check that modules needed to access devices on the system are available
def check_device_module_deps():
if not use_nvpex and FileMap.use_numpy:
import numpy
class NvPexError(Exception):
pass
GPU_ARCHES = ["kepler", "maxwell", "pascal", "volta", "turing", "ampere", "ada", "hopper"]
# For architectures with multiple products, match by device id as well. The
# values from this map are what's used in the GPU_MAP.
GPU_MAP_MULTIPLE = {
0x180000a1: {
"devids": {
0x2330: "H100-SXM",
0x2336: "H100-SXM",
},
"default": "H100-PCIE",
},
}
GPU_MAP = {
"H100-PCIE": {
"name": "H100-PCIE",
"arch": "hopper",
"pmu_reset_in_pmc": False,
"memory_clear_supported": True,
"forcing_ecc_on_after_reset_supported": True,
"nvdec": [],
"nvenc": [],
"other_npus": ["fsp"],
"nvlink": {
"number": 18,
"links_per_group": 6,
"base_offset": 0xa00000,
"per_group_offset": 0x40000,
},
"needs_npus_cfg": False,
},
"H100-SXM": {
"name": "H100-SXM",
"arch": "hopper",
"pmu_reset_in_pmc": False,
"memory_clear_supported": True,
"forcing_ecc_on_after_reset_supported": True,
"nvdec": [],
"nvenc": [],
"other_npus": ["fsp"],
"nvlink": {
"number": 18,
"links_per_group": 6,
"base_offset": 0xa00000,
"per_group_offset": 0x40000,
},
"needs_npus_cfg": False,
},
}
PCI_CFG_SPACE_SIZE = 256
PCI_CFG_SPACE_EXP_SIZE = 4096
PCI_CAPABILITY_LIST = 0x34
# PCI Express
PCI_CAP_ID_EXP = 0x10
# Power management
PCI_CAP_ID_PM = 0x01
CAP_ID_MASK = 0xff
# Advanced Error Reporting
PCI_EXT_CAP_ID_ERR = 0x01
# SRIOV
PCI_EXT_CAP_ID_SRIOV = 0x10
# Uncorrectable Error Status
PCI_ERR_UNCOR_STATUS = 4
# Uncorrectable Error Mask
PCI_ERR_UNCOR_MASK = 8
# Uncorrectable Error Severity
PCI_ERR_UNCOR_SEVER = 12
# Use libc's ffs() on Linux and fall back to a native implementation otherwise.
if is_linux:
import ctypes
libc = ctypes.cdll.LoadLibrary('libc.so.6')
# Set the mmap and munmap arg and return types.
# last mmap arg is off_t which ctypes doesn't have. Assume it's long as that what gcc defines it to.
libc.mmap.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_long]
libc.mmap.restype = ctypes.c_void_p
libc.munmap.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
libc.munmap.restype = ctypes.c_int
def ffs(n):
return libc.ffs(n)
else:
def ffs(n):
return (n & (-n)).bit_length()
class Bitfield(object):
"""Wrapper around bitfields, see PciUncorrectableErrors for an example"""
fields = {}
def __init__(self, raw, name=None):
self.raw = raw
if name is None:
name = self.__class__.__name__
self.name = name
def __field_get_mask(self, field):
bits = self.__class__.fields[field]
if isinstance(bits, int):
return bits
assert isinstance(bits, tuple)
high_bit = bits[0]
low_bit = bits[1]
mask = (1 << (high_bit - low_bit + 1)) - 1
mask <<= low_bit
return mask
def __field_get_shift(self, field):
mask = self.__field_get_mask(field)
assert mask != 0
return ffs(mask) - 1
def __getitem__(self, field):
mask = self.__field_get_mask(field)
shift = self.__field_get_shift(field)
return (self.raw & mask) >> shift
def __setitem__(self, field, val):
mask = self.__field_get_mask(field)
shift = self.__field_get_shift(field)
val = val << shift
assert (val & ~mask) == 0, "value 0x%x mask 0x%x" % (val, mask)
self.raw = (self.raw & ~mask) | val
def __str__(self):
return self.name + " " + str(self.values()) + " raw " + hex(self.raw)
def values(self):
vals = {}
for f in self.__class__.fields:
vals[f] = self[f]
return vals
def non_zero(self):
ret = {}
for k, v in self.values().items():
if v != 0:
ret[k] = v
return ret
def non_zero_fields(self):
ret = []
for k, v in self.values().items():
if v != 0:
ret.append(k)
return ret
class PciUncorrectableErrors(Bitfield):
size = 4
fields = {
# Undefined
"UND": 0x00000001,
# Data Link Protocol
"DLP": 0x00000010,
# Surprise Down
"SURPDN": 0x00000020,
# Poisoned TLP
"POISON_TLP": 0x00001000,
# Flow Control Protocol
"FCP": 0x00002000,
# Completion Timeout
"COMP_TIME": 0x00004000,
# Completer Abort
"COMP_ABORT": 0x00008000,
# Unexpected Completion
"UNX_COMP": 0x00010000,
# Receiver Overflow
"RX_OVER": 0x00020000,
# Malformed TLP
"MALF_TLP": 0x00040000,
# ECRC Error Status
"ECRC": 0x00080000,
# Unsupported Request
"UNSUP": 0x00100000,
# ACS Violation
"ACSV": 0x00200000,
# internal error
"INTN": 0x00400000,
# MC blocked TLP
"MCBTLP": 0x00800000,
# Atomic egress blocked
"ATOMEG": 0x01000000,
# TLP prefix blocked
"TLPPRE": 0x02000000,
}
def __str__(self):
# Print only the non zero bits
return "%s %s" % (self.name, str(self.non_zero_fields()))
PCI_EXP_DEVCAP2 = 36
PCI_EXP_DEVCTL2 = 40
class PciDevCtl2(Bitfield):
size = 2
fields = {
# Completion Timeout Value
"COMP_TIMEOUT": 0x000f,
# Completion Timeout Disable
"COMP_TMOUT_DIS": 0x0010,
# Alternative Routing-ID
"ARI": 0x0020,
# Set Atomic requests
"ATOMIC_REQ": 0x0040,
# Block atomic egress
"ATOMIC_EGRESS_BLOCK": 0x0080,
# Allow IDO for requests
"IDO_REQ_EN": 0x0100,
# Allow IDO for completions
"IDO_CMP_EN": 0x0200,
# Enable LTR mechanism
"LTR_EN": 0x0400,
# Enable OBFF Message type A
"OBFF_MSGA_EN": 0x2000,
# Enable OBFF Message type B
"OBFF_MSGB_EN": 0x4000,
# OBFF using WAKE# signaling
"OBFF_WAKE_EN": 0x6000,
}
# Access Control Services
PCI_EXT_CAP_ID_ACS = 0x0D
# ACS control
PCI_EXT_ACS_CTL = 6
class AcsCtl(Bitfield):
size = 2
fields = {
"SOURCE_VALIDATION": 0x0001,
"TRANSLATION_BLOCKING": 0x0002,
"P2P_REQUEST_REDIRECT": 0x0004,
"P2P_COMPLETION_REDIRECT": 0x0008,
"UPSTREAM_FORWARDING": 0x0010,
"P2P_EGRESS_CONTROL": 0x0020,
"DIRECT_TRANSLATED_P2P": 0x0040,
}
# Downstream Port Containment
PCI_EXT_CAP_ID_DPC = 0x1D
# DPC control
PCI_EXP_DPC_CTL = 6
class DpcCtl(Bitfield):
size = 2
fields = {
# Enable trigger on ERR_FATAL message
"EN_FATAL": 0x0001,
# Enable trigger on ERR_NONFATAL message
"EN_NONFATAL": 0x0002,
# DPC Interrupt Enable
"INT_EN": 0x0008,
}
# DPC Status
PCI_EXP_DPC_STATUS = 8
class DpcStatus(Bitfield):
size = 2
fields = {
# Trigger Status
"STATUS_TRIGGER": 0x0001,
# Trigger Reason
"STATUS_TRIGGER_RSN": 0x0006,
# Interrupt Status
"STATUS_INTERRUPT": 0x0008,
# Root Port Busy
"RP_BUSY": 0x0010,
# Trig Reason Extension
"STATUS_TRIGGER_RSN_EXT": 0x0060,
}
class DeviceField(object):
"""Wrapper for a device register/setting defined by a bitfield class and
accessible with dev.read()/write() at the specified offset"""
def __init__(self, bitfield_class, dev, offset, name=None):
self.dev = dev
self.offset = offset
self.bitfield_class = bitfield_class
self.size = bitfield_class.size
if name is None:
name = bitfield_class.__name__
self.name = name
self._read()
def _read(self):
raw = self.dev.read(self.offset, self.size)
self.value = self.bitfield_class(raw, name=self.name)
return self.value
def _write(self):
self.dev.write(self.offset, self.value.raw, self.size)
def __getitem__(self, field):
self._read()
return self.value[field]
def __setitem__(self, field, val):
self._read()
self.value[field] = val
self._write()
def write_only(self, field, val):
"""Write to the device with only the field set as specified. Useful for W1C bits"""
bf = self.bitfield_class(0)
bf[field] = val
self.dev.write(self.offset, bf.raw, self.size)
self._read()
def __str__(self):
self._read()
return str(self.value)
PCI_COMMAND = 0x04
class PciCommand(Bitfield):
size = 2
fields = {
"MEMORY": 0x0002,
"MASTER": 0x0004,
"PARITY": 0x0040,
"SERR": 0x0100,
}
PCI_EXP_FLAGS = 2
class PciExpFlags(Bitfield):
size = 2
fields = {
# Capability version
"VERS": 0x000f,
# Device/Port type
"TYPE": 0x00f0,
#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */
#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */
#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
# Slot implemented
"SLOT": 0x0100,
# Interrupt message number
"IRQ": 0x3e00,
}
PCI_EXP_RTCTL = 28
class PciRootControl(Bitfield):
size = 2
fields = {
# System Error on Correctable Error
"SECEE": 0x0001,
# System Error on Non-Fatal Error
"SENFEE": 0x0002,
# System Error on Fatal Error
"SEFEE": 0x0004,
# PME Interrupt Enable
"PMEIE": 0x0008,
# CRS Software Visibility Enable
"CRSSVE": 0x0010,
}
PCI_EXP_DEVCAP = 4
class PciDevCap(Bitfield):
size = 4
fields = {
# Max payload
"PAYLOAD": 0x00000007,
# Phantom functions
"PHANTOM": 0x00000018,
# Extended tags
"EXT_TAG": 0x00000020,
# L0s acceptable latency
"L0S": 0x000001c0,
# L1 acceptable latency
"L1": 0x00000e00,
# Attention Button Present
"ATN_BUT": 0x00001000,
# Attention indicator present
"ATN_IND": 0x00002000,
# Power indicator present
"PWR_IND": 0x00004000,
# Role-based error reporting
"RBER": 0x00008000,
# Slot power limit value
"PWR_VAL": 0x03fc0000,
# Slot Power Limit Scale
"PWR_SCL": 0x0c000000,
# Function level reset
"FLR": 0x10000000,
}
PCI_EXP_DEVCTL = 8
class PciDevCtl(Bitfield):
size = 4
fields = {
# /* Correctable Error Reporting En. */
"CERE": 0x0001,
# /* Non-Fatal Error Reporting Enable */
"NFERE": 0x0002,
# /* Fatal Error Reporting Enable */
"FERE": 0x0004,
# /* Unsupported Request Reporting En. */
"URRE": 0x0008,
# /* Enable relaxed ordering */
"RELAX_EN": 0x0010,
# /* Max_Payload_Size */
"PAYLOAD": 0x00e0,
# /* Extended Tag Field Enable */
"EXT_TAG": 0x0100,
# /* Phantom Functions Enable */
"PHANTOM": 0x0200,
# /* Auxiliary Power PM Enable */
"AUX_PME": 0x0400,
# /* Enable No Snoop */
"NOSNOOP_EN": 0x0800,
# /* Max_Read_Request_Size */
#"READRQ_128B 0x0000 /* 128 Bytes */
#"READRQ_256B 0x1000 /* 256 Bytes */
#"READRQ_512B 0x2000 /* 512 Bytes */
#"READRQ_1024B 0x3000 /* 1024 Bytes */
"READRQ": 0x7000,
# /* Bridge Configuration Retry / FLR */
"BCR_FLR": 0x8000,
}
PCI_EXP_LNKCAP = 12
class PciLinkCap(Bitfield):
size = 4
fields = {
# Maximum Link Width
"MLW": 0x000003f0,
# Surprise Down Error Reporting Capable
"SDERC": 0x00080000,
# Port Number
"PN": 0xff000000,
}
def __str__(self):
return "{ Link cap " + str(self.values()) + " raw " + hex(self.raw) + " }"
# Link Control
PCI_EXP_LNKCTL = 16
class PciLinkControl(Bitfield):
size = 2
fields = {
# ASPM Control
"ASPMC": 0x0003,
# Read Completion Boundary
"RCB": 0x0008,
# Link Disable
"LD": 0x0010,
# Retrain Link
"RL": 0x0020,
# Common Clock Configuration
"CCC": 0x0040,
# Extended Synch
"ES": 0x0080,
# Hardware Autonomous Width Disable
"HAWD": 0x0200,
# Enable clkreq
"CLKREQ_EN": 0x100,
# Link Bandwidth Management Interrupt Enable
"LBMIE": 0x0400,
# Lnk Autonomous Bandwidth Interrupt Enable
"LABIE": 0x0800,
}
def __str__(self):
return "{ Link control " + str(self.values()) + " raw " + hex(self.raw) + " }"
# Link Status
PCI_EXP_LNKSTA = 18
class PciLinkStatus(Bitfield):
size = 2
fields = {
# Current Link Speed
# CLS_2_5GB 0x01 Current Link Speed 2.5GT/s
# CLS_5_0GB 0x02 Current Link Speed 5.0GT/s
"CLS": 0x000f,
# Nogotiated Link Width
"NLW": 0x03f0,
# Link Training
"LT": 0x0800,
# Slot Clock Configuration
"SLC": 0x1000,
# Data Link Layer Link Active
"DLLLA": 0x2000,
# Link Bandwidth Management Status
"LBMS": 0x4000,
# Link Autonomous Bandwidth Status */
"LABS": 0x8000,
}
def __str__(self):
return "{ Link status " + str(self.values()) + " raw " + hex(self.raw) + " }"
PCI_EXP_SLTCAP = 20
PCI_EXP_SLTCTL = 24
class PciSlotControl(Bitfield):
size = 2
fields = {
# Attention Button Pressed Enable
"ABPE": 0x0001,
# Power Fault Detected Enable
"PFDE": 0x0002,
# MRL Sensor Changed Enable
"MRLSCE": 0x0004,
# Presence Detect Changed Enable
"PDCE": 0x0008,
# Command Completed Interrupt Enable
"CCIE": 0x0010,
# Hot-Plug Interrupt Enable
"HPIE": 0x0020,
# Attention Indicator Control
"AIC": 0x00c0,
# Power Indicator Control
"PIC": 0x0300,
# Power Controller Control
"PCC": 0x0400,
# Electromechanical Interlock Control
"EIC": 0x0800,
# Data Link Layer State Changed Enable
"DLLSCE": 0x1000,
}
PCI_EXP_LNKCTL2 = 48
class PciLinkControl2(Bitfield):
size = 2
fields = {
# Target link speed
"TLS": 0x000f,
}
PCI_PM_CTRL = 4
class PciPmControl(Bitfield):
size = 2
fields = {
"STATE": 0x0003,
"NO_SOFT_RESET": 0x0008,
}
DEVICES = { }
class Device(object):
def __init__(self):
self.parent = None
self.children = []
def is_hidden(self):
return True
def has_aer(self):
return False
def is_bridge(self):
return False
def is_root(self):
return self.parent == None
def is_gpu(self):
return False
def is_nvswitch(self):
return False
def is_plx(self):
return False
def is_intel(self):
return False
def has_dpc(self):
return False
def has_acs(self):
return False
def has_exp(self):
return False
class PciDevice(Device):
@staticmethod
def _open_config(dev_path):
dev_path_config = os.path.join(dev_path, "config")
return FileRaw(dev_path_config, 0, os.path.getsize(dev_path_config))
@staticmethod
def find_class_for_device(dev_path):
pci_dev = PciDevice(dev_path)
if pci_dev.has_exp():
# Root port
if pci_dev.pciflags["TYPE"] == 0x4:
if pci_dev.vendor == 0x8086:
return IntelRootPort
return PciBridge
# Upstream port
if pci_dev.pciflags["TYPE"] == 0x5:
if pci_dev.vendor == 0x10b5:
return PlxBridge
return PciBridge
# Downstream port
if pci_dev.pciflags["TYPE"] == 0x6:
if pci_dev.vendor == 0x10b5:
return PlxBridge
return PciBridge
# Endpoint
if pci_dev.pciflags["TYPE"] == 0x0:
if pci_dev.vendor == 0x10de:
return Gpu
if pci_dev.header_type == 0x1:
return PciBridge
else:
if pci_dev.vendor == 0x10de:
return Gpu
return PciDevice
@staticmethod
def init_dispatch(dev_path):
cls = PciDevice.find_class_for_device(dev_path)
if cls:
return cls(dev_path)
return None
@staticmethod
def find_or_init(dev_path):
if dev_path == None:
if -1 not in DEVICES:
DEVICES[-1] = Device()
return DEVICES[-1]
bdf = os.path.basename(dev_path)
if bdf in DEVICES:
return DEVICES[bdf]
dev = PciDevice.init_dispatch(dev_path)
DEVICES[bdf] = dev
return dev
def __init__(self, dev_path):
self.parent = None
self.children = []
self.dev_path = dev_path
self.bdf = os.path.basename(dev_path)
if use_nvpex:
self.nvpex = global_nvpex
# Config space in nvpex is special bar 0xffffffff
self.config = NvPexBar(self.nvpex, bar=0xffffffff, size=4096)
else:
self.config = self._open_config(dev_path)
self.vendor = self.config.read16(0)
self.device = self.config.read16(2)
self.header_type = self.config.read8(0xe)
self.cfg_space_broken = False
self._init_caps()
self._init_bars()
if not self.cfg_space_broken:
self.command = DeviceField(PciCommand, self.config, PCI_COMMAND)
if self.has_exp():
self.pciflags = DeviceField(PciExpFlags, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_FLAGS)
self.devcap = DeviceField(PciDevCap, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_DEVCAP)
self.devctl = DeviceField(PciDevCtl, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_DEVCTL)
self.devctl2 = DeviceField(PciDevCtl2, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_DEVCTL2)
self.link_cap = DeviceField(PciLinkCap, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_LNKCAP)
self.link_ctl = DeviceField(PciLinkControl, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_LNKCTL)
self.link_status = DeviceField(PciLinkStatus, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_LNKSTA)
# Root port or downstream port
if self.pciflags["TYPE"] == 0x4 or self.pciflags["TYPE"] == 0x6:
self.link_ctl_2 = DeviceField(PciLinkControl2, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_LNKCTL2)
if self.pciflags["TYPE"] == 4:
self.rtctl = DeviceField(PciRootControl, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_RTCTL)
if self.pciflags["SLOT"] == 1:
self.slot_ctl = DeviceField(PciSlotControl, self.config, self.caps[PCI_CAP_ID_EXP] + PCI_EXP_SLTCTL)
if self.has_aer():
self.uncorr_status = DeviceField(PciUncorrectableErrors, self.config, self.ext_caps[PCI_EXT_CAP_ID_ERR] + PCI_ERR_UNCOR_STATUS, name="UNCOR_STATUS")
self.uncorr_mask = DeviceField(PciUncorrectableErrors, self.config, self.ext_caps[PCI_EXT_CAP_ID_ERR] + PCI_ERR_UNCOR_MASK, name="UNCOR_MASK")
self.uncorr_sever = DeviceField(PciUncorrectableErrors, self.config, self.ext_caps[PCI_EXT_CAP_ID_ERR] + PCI_ERR_UNCOR_SEVER, name="UNCOR_SEVER")
if self.has_pm():
self.pmctrl = DeviceField(PciPmControl, self.config, self.caps[PCI_CAP_ID_PM] + PCI_PM_CTRL)
if self.has_acs():
self.acs_ctl = DeviceField(AcsCtl, self.config, self.ext_caps[PCI_EXT_CAP_ID_ACS] + PCI_EXT_ACS_CTL)
if self.has_dpc():
self.dpc_ctrl = DeviceField(DpcCtl, self.config, self.ext_caps[PCI_EXT_CAP_ID_DPC] + PCI_EXP_DPC_CTL)
self.dpc_status = DeviceField(DpcStatus, self.config, self.ext_caps[PCI_EXT_CAP_ID_DPC] + PCI_EXP_DPC_STATUS)
if is_sysfs_available:
self.parent = PciDevice.find_or_init(sysfs_find_parent(dev_path))
else:
# Create a dummy device as the parent if sysfs is not available
self.parent = Device()
def is_hidden(self):
return False
def has_aer(self):
return PCI_EXT_CAP_ID_ERR in self.ext_caps
def has_sriov(self):
return PCI_EXT_CAP_ID_SRIOV in self.ext_caps
def has_dpc(self):
return PCI_EXT_CAP_ID_DPC in self.ext_caps
def has_acs(self):
return PCI_EXT_CAP_ID_ACS in self.ext_caps
def has_exp(self):
return PCI_CAP_ID_EXP in self.caps
def has_pm(self):
return PCI_CAP_ID_PM in self.caps
def reinit(self):
self.__init__(self.dev_path)
def _bar_num_to_sysfs_resource(self, barnum):
sysfs_num = barnum
# sysfs has gaps in case of 64-bit BARs
for b in range(barnum):
if self.bars[b][2]:
sysfs_num += 1
return sysfs_num
def _init_bars_sysfs(self):
self.bars = []
resources = open(os.path.join(self.dev_path, "resource")).readlines()
# Consider only first 6 resources
for bar_line in resources[:6]:
bar_line = bar_line.split(" ")
addr = int(bar_line[0], base=16)
end = int(bar_line[1], base=16)
flags = int(bar_line[2], base=16)
# Skip non-MMIO regions
if flags & 0x1 != 0:
continue
if addr != 0:
size = end - addr + 1
is_64bit = False
if (flags >> 1) & 0x3 == 0x2:
is_64bit = True
self.bars.append((addr, size, is_64bit))
def _init_bars(self):
if is_sysfs_available:
self._init_bars_sysfs()
else:
self._init_bars_config_space()
def _map_bar(self, bar_num, bar_size=None):
bar_addr = self.bars[bar_num][0]
if not bar_size:
bar_size = self.bars[bar_num][1]
if use_nvpex:
return NvPexBar(self.nvpex, bar=bar_num, size=bar_size)
else:
if mmio_access_type == "sysfs":
return FileMap(os.path.join(self.dev_path, f"resource{self._bar_num_to_sysfs_resource(bar_num)}"), 0, bar_size)
else:
return FileMap("/dev/mem", bar_addr, bar_size)
def _init_caps(self):
self.caps = {}
self.ext_caps = {}
cap_offset = self.config.read8(PCI_CAPABILITY_LIST)
data = 0
if cap_offset == 0xff:
self.cfg_space_broken = True
error("Broken device %s", self.dev_path)
return
while cap_offset != 0:
data = self.config.read32(cap_offset)
cap_id = data & CAP_ID_MASK
self.caps[cap_id] = cap_offset
cap_offset = (data >> 8) & 0xff
self._init_ext_caps()
def _init_ext_caps(self):
if self.config.size <= PCI_CFG_SPACE_SIZE:
return
offset = PCI_CFG_SPACE_SIZE
header = self.config.read32(PCI_CFG_SPACE_SIZE)
while offset != 0:
cap = header & 0xffff
self.ext_caps[cap] = offset
offset = (header >> 20) & 0xffc
header = self.config.read32(offset)
def __str__(self):
return "PCI %s %s:%s" % (self.bdf, hex(self.vendor), hex(self.device))
def __hash__(self):
return hash((self.bdf, self.vendor, self.device))
def set_command_memory(self, enable):
self.command["MEMORY"] = 1 if enable else 0
def sanity_check_cfg_space(self):
# Use an offset unlikely to be intercepted in case of virtualization
vendor = self.config.read16(0xf0)
return vendor != 0xffff
def sysfs_reset(self):
reset_path = os.path.join(self.dev_path, "reset")
if not os.path.exists(reset_path):
error("%s reset not present: '%s'", self, reset_path)
with open(reset_path, "w") as rf:
rf.write("1")
def reset_with_os(self):
if is_linux:
return self.sysfs_reset()
def is_flr_supported(self):
if not self.has_exp():
return False
return self.devcap["FLR"] == 1
PCI_BRIDGE_CONTROL = 0x3e
class PciBridgeControl(Bitfield):
size = 1
fields = {
# Enable parity detection on secondary interface
"PARITY": 0x01,
# The same for SERR forwarding
"SERR": 0x02,
# Enable ISA mode
"ISA": 0x04,
# Forward VGA addresses
"VGA": 0x08,
# Report master aborts
"MASTER_ABORT": 0x20,
# Secondary bus reset (SBR)
"BUS_RESET": 0x40,
# Fast Back2Back enabled on secondary interface
"FAST_BACK": 0x80,
}
def __str__(self):
return "{ Bridge control " + str(self.values()) + " raw " + hex(self.raw) + " }"
class PciBridge(PciDevice):
def __init__(self, dev_path):
super(PciBridge, self).__init__(dev_path)
self.bridge_ctl = DeviceField(PciBridgeControl, self.config, PCI_BRIDGE_CONTROL)
if self.parent:
self.parent.children.append(self)
class BrokenGpu(PciDevice):
def __init__(self, dev_path):
super(BrokenGpu, self).__init__(dev_path)
self.name = "BrokenGpu"
self.cfg_space_working = False
self.bars_configured = False
self.cfg_space_working = self.sanity_check_cfg_space()
error("Config space working %s", str(self.cfg_space_working))
if self.cfg_space_working:
self.bars_configured = self.sanity_check_cfg_space_bars()
if self.parent:
self.parent.children.append(self)
def is_gpu(self):
return True
def is_broken_gpu(self):
return True
def reset_with_sbr(self):
assert self.parent.is_bridge()
self.parent.toggle_sbr()
return self.sanity_check_cfg_space()
def is_driver_loaded(self):
return False
def __str__(self):
return "GPU %s [broken, cfg space working %d bars configured %d]" % (self.bdf, self.cfg_space_working, self.bars_configured)
class NvidiaDevice(PciDevice):
def __init__(self, dev_path):
super(NvidiaDevice, self).__init__(dev_path)
self.bar0_addr = self.bars[0][0]
self.fsp_rpc = None
self._mod_name = None
if self.parent:
self.parent.children.append(self)
def common_init(self):
self.nvlink = None
if "nvlink" in self.props:
self.nvlink = self.props["nvlink"]
@property
def is_nvlink_supported(self):
return self.nvlink is not None
def is_gpu(self):
return False
def is_broken_gpu(self):
return False
def is_unknown(self):
return True
def reset_with_sbr(self):
assert False
def write(self, reg, data):
self.bar0.write32(reg, data)
def write_verbose(self, reg, data):
old = self.read(reg)
self.bar0.write32(reg, data)
new = self.read(reg)
debug("%s writing %s = %s (old %s diff %s) new %s", self, hex(reg), hex(data), hex(old), hex(data ^ old), hex(new))
def sanity_check(self):
if not self.sanity_check_cfg_space():
debug("%s sanity check of config space failed", self)
return False
boot = self.read(NV_PMC_BOOT_0)
if boot == 0xffffffff:
debug("%s sanity check of mmio failed", self)
return False
return True
def reset_pre(self, reset_with_flr=None):
if reset_with_flr == None:
reset_with_flr = self.is_flr_supported()
debug("%s reset_pre FLR supported %s, FLR being used %s", self, self.is_flr_supported(), reset_with_flr)
self.expected_sbr_only_scratch = (1 if reset_with_flr else 0)
flr_scratch = self.flr_resettable_scratch()
sbr_scratch = self.sbr_resettable_scratch()
self.write_verbose(flr_scratch, 0x1)
self.write_verbose(sbr_scratch, 0x1)
if self.read(sbr_scratch) == 0:
debug(f"{self} SBR scratch writes not sticking")
self.expected_sbr_only_scratch = 0
def reset_post(self):
flr_scratch = self.flr_resettable_scratch()
sbr_scratch = self.sbr_resettable_scratch()
debug(f"{self} reset_post flr-scratch after 0x{self.read_bad_ok(flr_scratch):x}, sbr-only scratch 0x{self.read_bad_ok(sbr_scratch):x}, flr cap {self.is_flr_supported()}")
def sysfs_reset(self):
self.reset_pre()
super(NvidiaDevice, self).sysfs_reset()
self.reset_post()
def _init_fsp_rpc(self):
if self.fsp_rpc != None:
return
# Wait for boot to be done such that FSP is available
self.wait_for_boot()
self.init_npus()
self.fsp_rpc = FspRpc(self.fsp, channel_num=2)
def poll_register(self, name, offset, value, timeout, sleep_interval=0.01, mask=0xffffffff, debug_print=False):
timestamp = perf_counter()
while True:
loop_stamp = perf_counter()
try:
if value >> 16 == 0xbadf:
reg = self.read_bad_ok(offset)
else:
reg = self.read(offset)
except:
error("Failed to read npu register %s (%s)", name, hex(offset))
raise
if reg & mask == value:
if debug_print:
debug("Register %s (%s) = %s after %f secs", name, hex(offset), hex(value), perf_counter() - timestamp)
return
if loop_stamp - timestamp > timeout:
raise GpuError("Timed out polling register %s (%s), value %s is not the expected %s. Timeout %f secs" % (name, hex(offset), hex(reg), hex(value), timeout))
if sleep_interval > 0.0:
time.sleep(sleep_interval)
class GpuMemPort(object):
def __init__(self, name, mem_control_reg, max_size, npu):
self.name = name
self.control_reg = mem_control_reg
self.data_reg = self.control_reg + NV_PPWR_NPU_IMEMD(0) - NV_PPWR_NPU_IMEMC(0)
self.offset = 0
self.max_size = max_size
self.auto_inc_read = False
self.auto_inc_write = False
self.secure_imem = False
self.npu = npu
self.need_to_write_config_to_hw = True
def __str__(self):
return "%s offset %d (0x%x) incr %d incw %d max size %d (0x%x) control reg 0x%x = 0x%x" % (self.name,
self.offset, self.offset, self.auto_inc_read, self.auto_inc_write,
self.max_size, self.max_size,
self.control_reg, self.npu.gpu.read(self.control_reg))
def configure(self, offset, inc_read=True, inc_write=True, secure_imem=False):
need_to_write = self.need_to_write_config_to_hw
if offset != self.offset:
self.offset = offset
need_to_write = True
if self.auto_inc_read != inc_read:
self.auto_inc_read = inc_read
need_to_write = True
if self.auto_inc_write != inc_write:
self.auto_inc_write = inc_write
need_to_write = True
if self.secure_imem != secure_imem:
self.secure_imem = secure_imem
need_to_write = True
if not need_to_write:
return
memc_value = offset
if inc_read:
memc_value |= NV_PPWR_NPU_IMEMC_AINCR_TRUE
if inc_write:
memc_value |= NV_PPWR_NPU_IMEMC_AINCW_TRUE
if secure_imem:
memc_value |= NV_PPWR_NPU_IMEMC_SECURE_ENABLED
self.npu.gpu.write(self.control_reg, memc_value)
self.need_to_write_config_to_hw = False
def handle_offset_wraparound(self):
if self.offset == self.max_size:
self.configure(0, self.auto_inc_read, self.auto_inc_write, self.secure_imem)
def read(self, size):
data = []
for offset in range(0, size, 4):
# MEM could match 0xbadf... so use read_bad_ok()
data.append(self.npu.gpu.read_bad_ok(self.data_reg))
if self.auto_inc_read:
self.offset += size
self.handle_offset_wraparound()
return data
def write(self, data, debug_write=False):
for d in data:
if debug_write:
control = self.npu.gpu.read(self.control_reg)
debug("Writing data %s = %s offset %s, control %s", hex(self.data_reg), hex(d), hex(self.offset), hex(control))
self.npu.gpu.write(self.data_reg, d)
if self.auto_inc_write:
self.offset += 4
self.handle_offset_wraparound()
class GpuImemPort(GpuMemPort):
def __init__(self, name, mem_control_reg, max_size, npu):
super(GpuImemPort, self).__init__(name, mem_control_reg, max_size, npu)
self.imemt_reg = self.control_reg + NV_PPWR_NPU_IMEMT(0) - NV_PPWR_NPU_IMEMC(0)
class GpuFalcon(object):
def __init__(self, name, cpuctl, device, pmc_enable_mask=None, pmc_device_enable_mask=None):
self.name = name
self.device = device
self.gpu = device
self.base_page = cpuctl & ~0xfff
self.base_page_emem = getattr(self, 'base_page_emem', self.base_page)
self.cpuctl = cpuctl
self.pmc_enable_mask = pmc_enable_mask
self.pmc_device_enable_mask = pmc_device_enable_mask
self.no_outside_reset = getattr(self, 'no_outside_reset', False)
self.has_emem = getattr(self, 'has_emem', False)
self.num_emem_ports = getattr(self, 'num_emem_ports', 1)
self._max_imem_size = None
self._max_dmem_size = None
self._max_emem_size = None
self._imem_port_count = None
self._dmem_port_count = None
self._default_core_npu = None
self._can_run_ns = None
self.csb_offset_mailbox0 = getattr(self, 'csb_offset_mailbox0', 0x40)
self.mem_ports = []
self.enable()
self.mem_spaces = ["imem", "dmem"]
self.imem_ports = []
for p in range(0, self.imem_port_count):
name = self.name + "_imem_%d" % p
mem_control_reg = self.imemc + p * 16
max_size = self.max_imem_size
self.imem_ports.append(GpuImemPort(name, mem_control_reg, max_size, self))
self.dmem_ports = []
for p in range(0, self.dmem_port_count):
name = self.name + "_dmem_%d" % p
mem_control_reg = self.dmemc + p * 8
max_size = self.max_dmem_size
self.dmem_ports.append(GpuMemPort(name, mem_control_reg, max_size, self))
self.emem_ports = []
if self.has_emem:
self.mem_spaces.append("emem")
self._init_emem_ports()
self.mem_ports = self.imem_ports + self.dmem_ports + self.emem_ports
def _init_emem_ports(self):
assert self.has_emem
for p in range(self.num_emem_ports):
name = self.name + f"_emem_{p}"
self.emem_ports.append(GpuMemPort(name, self.base_page_emem + 0xac0 + p * 8, self.max_emem_size, self))
@property
def imemc(self):
return self.cpuctl + NV_PPWR_NPU_IMEMC(0) - NV_PPWR_NPU_CPUCTL
@property
def dmemc(self):
return self.cpuctl + NV_PPWR_NPU_DMEMC(0) - NV_PPWR_NPU_CPUCTL
@property
def hwcfg1(self):
return self.cpuctl + NV_PPWR_NPU_HWCFG1 - NV_PPWR_NPU_CPUCTL
@property
def hwcfg_emem(self):
return self.cpuctl + 0x9bc
@property
def max_imem_size(self):
if self._max_imem_size:
return self._max_imem_size
if self.name not in self.gpu.npus_cfg:
if self.gpu.needs_npus_cfg:
error("Missing imem/dmem config for npu %s, falling back to hwcfg", self.name)
self._max_imem_size = self.max_imem_size_from_hwcfg()
else:
# Use the imem size provided in the GPU config
self._max_imem_size = self.gpu.npus_cfg[self.name]["imem_size"]
# And make sure it matches HW
if self._max_imem_size != self.max_imem_size_from_hwcfg():
raise GpuError("HWCFG imem doesn't match %d != %d" % (self._max_imem_size, self.max_imem_size_from_hwcfg()))
return self._max_imem_size
@property
def max_dmem_size(self):
if self._max_dmem_size:
return self._max_dmem_size
if self.name not in self.gpu.npus_cfg:
if self.gpu.needs_npus_cfg:
error("Missing imem/dmem config for npu %s, falling back to hwcfg", self.name)
self._max_dmem_size = self.max_dmem_size_from_hwcfg()
else:
# Use the dmem size provided in the GPU config
self._max_dmem_size = self.gpu.npus_cfg[self.name]["dmem_size"]
# And make sure it matches HW
if self._max_dmem_size != self.max_dmem_size_from_hwcfg():
raise GpuError("HWCFG dmem doesn't match %d != %d" % (self._max_dmem_size, self.max_dmem_size_from_hwcfg()))
return self._max_dmem_size
@property
def max_emem_size(self):
if self._max_emem_size:
return self._max_emem_size
if self.name not in self.gpu.npus_cfg or "emem_size" not in self.gpu.npus_cfg[self.name]:
if self.gpu.needs_npus_cfg:
error("Missing emem config for npu %s, falling back to hwcfg", self.name)
self._max_emem_size = self.max_emem_size_from_hwcfg()
else:
# Use the emem size provided in the GPU config
self._max_emem_size = self.gpu.npus_cfg[self.name]["emem_size"]
# And make sure it matches HW
if self._max_emem_size != self.max_emem_size_from_hwcfg():
raise GpuError("HWCFG emem doesn't match %d != %d" % (self._max_emem_size, self.max_emem_size_from_hwcfg()))
return self._max_emem_size
@property
def dmem_port_count(self):
if self._dmem_port_count:
return self._dmem_port_count
if self.name not in self.gpu.npus_cfg or "dmem_port_count" not in self.gpu.npus_cfg[self.name]:
if self.gpu.needs_npus_cfg:
error("%s missing dmem port count for npu %s, falling back to hwcfg", self.gpu, self.name)
self._dmem_port_count = self.dmem_port_count_from_hwcfg()
else:
# Use the dmem port count provided in the GPU config
self._dmem_port_count = self.gpu.npus_cfg[self.name]["dmem_port_count"]
# And make sure it matches HW
if self._dmem_port_count != self.dmem_port_count_from_hwcfg():
raise GpuError("HWCFG dmem port count doesn't match %d != %d" % (self._dmem_port_count, self.dmem_port_count_from_hwcfg()))
return self._dmem_port_count
@property
def imem_port_count(self):
if self._imem_port_count:
return self._imem_port_count
if self.name not in self.gpu.npus_cfg or "imem_port_count" not in self.gpu.npus_cfg[self.name]:
if self.gpu.needs_npus_cfg:
error("%s missing imem port count for npu %s, falling back to hwcfg", self.gpu, self.name)
self._imem_port_count = self.imem_port_count_from_hwcfg()
else:
# Use the imem port count provided in the GPU config
self._imem_port_count = self.gpu.npus_cfg[self.name]["imem_port_count"]
# And make sure it matches HW
if self._imem_port_count != self.imem_port_count_from_hwcfg():
raise GpuError("HWCFG imem port count doesn't match %d != %d" % (self._imem_port_count, self.imem_port_count_from_hwcfg()))
return self._imem_port_count
def max_imem_size_from_hwcfg(self):
if self.device.is_nvswitch() or self.gpu.is_ampere_plus:
hwcfg = self.gpu.read(self.base_page + 0x278)
return (hwcfg & 0xfff) * 256
else:
hwcfg = self.gpu.read(self.hwcfg)
return (hwcfg & 0x1ff) * 256
def max_dmem_size_from_hwcfg(self):
if self.device.is_nvswitch() or self.gpu.is_ampere_plus:
hwcfg = self.gpu.read(self.base_page + 0x278)
return ((hwcfg >> 16) & 0xfff) * 256
else:
hwcfg = self.gpu.read(self.hwcfg)
return ((hwcfg >> 9) & 0x1ff) * 256
def max_emem_size_from_hwcfg(self):
assert self.has_emem
hwcfg = self.gpu.read(self.hwcfg_emem)
return (hwcfg & 0x1ff) * 256
def imem_port_count_from_hwcfg(self):
hwcfg = self.gpu.read(self.hwcfg1)
return ((hwcfg >> 8) & 0xf)
def dmem_port_count_from_hwcfg(self):
hwcfg = self.gpu.read(self.hwcfg1)
return ((hwcfg >> 12) & 0xf)
def get_mem_ports(self, mem):
if mem == "imem":
return self.imem_ports
elif mem == "dmem":
return self.dmem_ports
elif mem == "emem":
assert self.has_emem
return self.emem_ports
else:
assert 0, "Unknown mem %s" % mem
def get_mem_port(self, mem, port=0):
return self.get_mem_ports(mem)[port]
def load_imem(self, data, phys_base, virt_base, secure=False, virtual_tag=True, debug_load=False):
self.imem_ports[0].configure(offset=phys_base, secure_imem=secure)
if virtual_tag:
self.imem_ports[0].write_with_tags(data, virt_base=virt_base, debug_write=debug_load)
else:
self.imem_ports[0].write(data, debug_write=debug_load)
def read_port(self, port, phys_base, size):
port.configure(offset=phys_base)
return port.read(size)
def write_port(self, port, data, phys_base, debug_write=False):
port.configure(offset=phys_base)
port.write(data, debug_write)
def read_imem(self, phys_base, size):
return self.read_port(self.imem_ports[0], phys_base, size)
def load_dmem(self, data, phys_base, debug_load=False):
self.write_port(self.dmem_ports[0], data, phys_base, debug_write=debug_load)
def read_dmem(self, phys_base, size):
return self.read_port(self.dmem_ports[0], phys_base, size)
def write_emem(self, data, phys_base, port=0, debug_write=False):
self.write_port(self.emem_ports[port], data, phys_base, debug_write=debug_write)
def read_emem(self, phys_base, size, port=0):
return self.read_port(self.emem_ports[port], phys_base, size)
def enable(self):
if self.no_outside_reset:
pass
elif self.pmc_enable_mask:
pmc_enable = self.gpu.read(NV_PMC_ENABLE)
self.gpu.write(NV_PMC_ENABLE, pmc_enable | self.pmc_enable_mask)
elif self.pmc_device_enable_mask:
enable = self.gpu.read(NV_PMC_DEVICE_ENABLE)
self.gpu.write(NV_PMC_DEVICE_ENABLE, enable | self.pmc_device_enable_mask)
else:
self.gpu.write(self.engine_reset, 0)
if not self.device.has_fsp:
if not self.default_core_npu:
self.select_core_npu()
self.gpu.poll_register(self.name + " dmactl", self.dmactl, value=0, timeout=1, mask=0x6)
self.reset_mem_ports()
def reset_mem_ports(self):
for m in self.mem_ports:
m.need_to_write_config_to_hw = True
class FspFalcon(GpuFalcon):
def __init__(self, device):
self.no_outside_reset = True
self.has_emem = True
self.base_page_emem = 0x8f2000
self.num_emem_ports = 8
super(FspFalcon, self).__init__("fsp", 0x8f0100, device, pmc_enable_mask=None)
def queue_head_off(self, i):
return self.base_page + 0x2c00 + i * 8
def queue_tail_off(self, i):
return self.base_page + 0x2c04 + i * 8
def msg_queue_head_off(self, i):
return self.base_page + 0x2c80 + i * 8
def msg_queue_tail_off(self, i):
return self.base_page + 0x2c84 + i * 8
class FspRpc(object):
def __init__(self, fsp_npu, channel_num):
self.npu = fsp_npu
self.device = self.npu.device
self.channel_num = channel_num
self.nvdm_emem_base = self.channel_num * 1024
self.reset_rpc_state()
def __str__(self):
return f"{self.device} FSP-RPC"
def reset_rpc_state(self):
if self.is_queue_empty() and self.is_msg_queue_empty():
debug(f"{self} both queues empty; queue {self.read_queue_state()} msg queue {self.read_msg_queue_state()}")
return
debug(f"{self} one of the queues not empty, waiting for things to settle; queue {self.read_queue_state()} msg queue {self.read_msg_queue_state()}")
self.poll_for_msg_queue(timeout_fatal=False)
debug(f"{self} after wait; queue {self.read_queue_state()} msg queue {self.read_msg_queue_state()}")
# Reset both queues
self.write_queue_head_tail(self.nvdm_emem_base, self.nvdm_emem_base)
self.device.write_verbose(self.npu.msg_queue_tail_off(self.channel_num), self.nvdm_emem_base)
self.device.write_verbose(self.npu.msg_queue_head_off(self.channel_num), self.nvdm_emem_base)
def read_queue_state(self):
return (self.device.read(self.npu.queue_head_off(self.channel_num)),
self.device.read(self.npu.queue_tail_off(self.channel_num)))
def is_queue_empty(self):
mhead, mtail = self.read_queue_state()
return mhead == mtail
def write_queue_head_tail(self, head, tail):
self.device.write_verbose(self.npu.queue_tail_off(self.channel_num), tail)
self.device.write_verbose(self.npu.queue_head_off(self.channel_num), head)
def read_msg_queue_state(self):
return (self.device.read(self.npu.msg_queue_head_off(self.channel_num)),
self.device.read(self.npu.msg_queue_tail_off(self.channel_num)))
def is_msg_queue_empty(self):
mhead, mtail = self.read_msg_queue_state()
return mhead == mtail
def write_msg_queue_tail(self, tail):
self.device.write_verbose(self.npu.msg_queue_tail_off(self.channel_num), tail)
def poll_for_msg_queue(self, timeout=5, sleep_interval=0.01, timeout_fatal=True):
timestamp = perf_counter()
while True:
loop_stamp = perf_counter()
mhead, mtail = self.read_msg_queue_state()
if mhead != mtail:
return
if loop_stamp - timestamp > timeout:
if timeout_fatal:
raise GpuError(f"Timed out polling for {self.npu.name} message queue on channel {self.channel_num}. head {mhead} == tail {mtail}")
else:
return
if sleep_interval > 0.0:
time.sleep(sleep_interval)
def poll_for_queue_empty(self, timeout=1, sleep_interval=0.01):
timestamp = perf_counter()
while True:
loop_stamp = perf_counter()
if self.is_queue_empty():
return
if loop_stamp - timestamp > timeout:
raise GpuError(f"Timed out polling for {self.npu.name} cmd queue to be empty on channel {self.channel_num}. head {mhead} != tail {mtail}")
if sleep_interval > 0.0:
time.sleep(sleep_interval)
def prc_cmd(self, data):
mctp_header = MctpHeader()
mctp_msg_header = MctpMessageHeader()
mctp_msg_header.fields.nvdm_type = 0x13
self.device.wait_for_boot()
self.poll_for_queue_empty()
head, tail = self.read_queue_state()
if head != tail:
raise GpuError(f"RPC cmd queue not empty head {head} tail {tail}")
mhead, mtail = self.read_msg_queue_state()
if mhead != mtail:
raise GpuError(f"RPC msg queue not empty head {mhead} tail {mtail}")
cdata = [mctp_header.raw, mctp_msg_header.raw] + data
debug(f"{self} command {[hex(d) for d in cdata]}")
self.npu.write_emem(cdata, phys_base=self.nvdm_emem_base, port=self.channel_num)
self.write_queue_head_tail(self.nvdm_emem_base, self.nvdm_emem_base + (len(cdata) - 1) * 4)
rpc_time = perf_counter()
self.poll_for_msg_queue()
rpc_time = perf_counter() - rpc_time
debug(f"{self} response took {rpc_time*1000:.1f} ms")
mhead, mtail = self.read_msg_queue_state()
debug(f"{self} msg queue after poll {mhead} {mtail}")
msize = mtail - mhead + 4
mdata = self.npu.read_emem(self.nvdm_emem_base, msize, port=self.channel_num)
debug(f"{self} response {[hex(d) for d in mdata]}")
# Reset the tail before checking for errors
self.write_msg_queue_tail(mhead)
if msize < 5 * 4:
raise GpuError(f"{self} response size {msize} is smaller than expected. Data {[hex(d) for d in mdata]}")
mctp_msg_header.raw = mdata[1]
if mctp_msg_header.fields.nvdm_type != 0x15:
raise GpuError(f"{self} message wrong nvdm_type. Data {[hex(d) for d in mdata]}")
if mdata[3] != 0x13:
raise GpuError(f"{self} message request type 0x{mdata[3]:x} not matching the command. Data {[hex(d) for d in mdata]}")
if mdata[4] != 0x0:
raise GpuError(f"{self} failed with error 0x{mdata[4]:x}. Data {[hex(d) for d in mdata]}")
return mdata[5:]
def prc_knob_read(self, knob_id):
# Knob read is sub msg 0xc
prc = 0xc
prc |= 0x2 << 8
prc |= knob_id << 16
debug(f"{self} reading knob 0x{knob_id:x}")
data = self.prc_cmd([prc])
if len(data) != 1:
raise GpuError(f"RPC wrong response size {len(data)}. Data {[hex(d) for d in data]}")
debug(f"{self} read knob 0x{knob_id:x} = 0x{data[0]:x}")
return data[0]
def prc_knob_write(self, knob_id, value):
# Knob write is sub msg 0xd
prc = 0xd
prc |= 0x2 << 8
prc |= knob_id << 16
prc_1 = value
debug(f"{self} writing knob 0x{knob_id:x} = 0x{value:x}")
data = self.prc_cmd([prc, prc_1])
if len(data) != 0:
raise GpuError(f"RPC wrong response size {len(data)}. Data {[hex(d) for d in data]}")
debug(f"{self} wrote knob 0x{knob_id:x} = 0x{value:x}")
def prc_knob_check_and_write(self, knob_id, value):
old_value = self.prc_knob_read(knob_id)
if old_value != value:
self.prc_knob_write(knob_id, value)
class UnknownDevice(Exception):
pass
class UnknownGpuError(Exception):
pass
class BrokenGpuError(Exception):
pass
class GpuError(Exception):
pass
class GpuUcode(object):
def __init__(self, name, binary):
self.name = name
self.binary = binary
self.pkc = None
@property
def imem_ns_size(self):
return len(self.imem_ns) * 4
@property
def imem_sec_size(self):
return len(self.imem_sec) * 4
@property
def dmem_size(self):
return len(self.dmem) * 4
def __str__(self):
return "Ucode %s (imem_ns size %d virt 0x%x phys 0x%x, imem_sec size %d virt 0x%x phys 0x%x, dmem size %d base 0x%x)" % (self.name,
self.imem_ns_size, self.imem_ns_virt_base, self.imem_ns_phys_base,
self.imem_sec_size, self.imem_sec_virt_base, self.imem_sec_phys_base,
self.dmem_size, self.dmem_phys_base)
class NiceStruct(ctypes.LittleEndianStructure):
def __str__(self) -> str:
fields = {field[0]: getattr(self, field[0]) for field in self._fields_}
return str(fields)
class MctpHeader_bits(NiceStruct):
_fields_ = [
("version", c_uint32, 4),
("rsvd0", c_uint32, 4),
("deid", c_uint32, 8),
("seid", c_uint32, 8),
("tag", c_uint32, 3),
("to", c_uint32, 1),
("seq", c_uint32, 2),
("eom", c_uint32, 1),
("som", c_uint32, 1),
]
class MctpHeader(ctypes.Union):
_fields_ = [("fields", MctpHeader_bits),
("raw", c_uint32)]
def __init__(self):
self.fields.som = 1
self.fields.eom = 1
class MctpMessageHeader_bits(NiceStruct):
_fields_ = [
("type", c_uint32, 7),
("ic", c_uint32, 1),
("vendor_id", c_uint32, 16),
("nvdm_type", c_uint32, 8),
]
class MctpMessageHeader(ctypes.Union):
_fields_ = [("fields", MctpMessageHeader_bits),
("raw", c_uint32)]
def __init__(self):
self.fields.type = 0x7e
self.fields.vendor_id = 0x10de
class PrcKnob(Enum):
PRC_KNOB_ID_01 = 0x01
PRC_KNOB_ID_02 = 0x02
PRC_KNOB_ID_03 = 0x03
PRC_KNOB_ID_04 = 0x04
PRC_KNOB_ID_CCD_ALLOW_INB = 0x05
PRC_KNOB_ID_CCD = 0x06
PRC_KNOB_ID_CCM_ALLOW_INB = 0x07
PRC_KNOB_ID_CCM = 0x08
PRC_KNOB_ID_BAR0_DECOUPLER_ALLOW_INB = 0x09
PRC_KNOB_ID_BAR0_DECOUPLER = 0x0a
PRC_KNOB_ID_21 = 0x21
PRC_KNOB_ID_22 = 0x22
class Gpu(NvidiaDevice):
def __init__(self, dev_path):
self.name = "?"
self.bar0_addr = 0
super(Gpu, self).__init__(dev_path)
if not self.sanity_check_cfg_space():
debug("%s sanity check of config space failed", self)
raise BrokenGpuError()
# Enable MMIO
self.set_command_memory(True)
if self.has_pm():
if self.pmctrl["STATE"] != 0:
warning("%s not in D0 (current state %d), forcing it to D0", self, self.pmctrl["STATE"])
self.pmctrl["STATE"] = 0
self.bar0_addr = self.bars[0][0]
self.bar0_size = GPU_BAR0_SIZE
self.bar1_addr = self.bars[1][0]
self.bar0 = self._map_bar(0)
# Map just a small part of BAR1 as we don't need it all
self.bar1 = self._map_bar(1, 1024 * 1024)
self.pmcBoot0 = self.read(NV_PMC_BOOT_0)
if self.pmcBoot0 == 0xffffffff:
debug("%s sanity check of bar0 failed", self)
raise BrokenGpuError()
gpu_map_key = self.pmcBoot0
if gpu_map_key in GPU_MAP_MULTIPLE:
match = GPU_MAP_MULTIPLE[self.pmcBoot0]
# Check for a device id match. Fall back to the default, if not found.
gpu_map_key = GPU_MAP_MULTIPLE[self.pmcBoot0]["devids"].get(self.device, match["default"])
if gpu_map_key not in GPU_MAP:
for off in [0x0, 0x88000, 0x88004]:
debug("%s offset 0x%x = 0x%x", self.bdf, off, self.read(off))
raise UnknownGpuError("GPU %s %s bar0 %s" % (self.bdf, hex(self.pmcBoot0), hex(self.bar0_addr)))
self.gpu_props = GPU_MAP[gpu_map_key]
gpu_props = self.gpu_props
self.props = gpu_props
self.name = gpu_props["name"]
self.arch = gpu_props["arch"]
self.is_pmu_reset_in_pmc = gpu_props["pmu_reset_in_pmc"]
self.is_memory_clear_supported = gpu_props["memory_clear_supported"]
# Querying ECC state relies on being able to initialize/clear memory
self.is_ecc_query_supported = self.is_memory_clear_supported
self.is_cc_query_supported = self.is_hopper_plus
self.is_forcing_ecc_on_after_reset_supported = gpu_props["forcing_ecc_on_after_reset_supported"]
self.is_setting_ecc_after_reset_supported = self.is_ampere_plus
self.is_mig_mode_supported = self.is_ampere_100
if not self.sanity_check():
debug("%s sanity check failed", self)
raise BrokenGpuError()
self.init_priv_ring()
self.bar0_window_base = 0
self.bar0_window_initialized = False
self.bios = None
self.npus = None
self.npu_dma_initialized = False
self.npus_cfg = gpu_props.get("npus_cfg", {})
self.needs_npus_cfg = gpu_props.get("needs_npus_cfg", {})
if self.is_ampere_plus:
graphics_mask = 0
graphics_bits = [12]
if self.is_ampere_100:
graphics_bits += [1, 9, 10, 11, 13, 14, 18]
for gb in graphics_bits:
graphics_mask |= (0x1 << gb)
self.pmc_device_graphics_mask = graphics_mask
self.hulk_ucode_data = None
self.common_init()
def init_npus(self):
if self.npus is not None:
return
self.npus = []
gpu_props = self.gpu_props
if "fsp" in gpu_props["other_npus"]:
self.fsp = FspFalcon(self)
self.npus.append(self.fsp)
@property
def is_maxwell_plus(self):
return GPU_ARCHES.index(self.arch) >= GPU_ARCHES.index("maxwell")
@property
def is_pascal(self):
return GPU_ARCHES.index(self.arch) == GPU_ARCHES.index("pascal")
@property
def is_pascal_plus(self):
return GPU_ARCHES.index(self.arch) >= GPU_ARCHES.index("pascal")
@property
def is_pascal_10x_plus(self):
return self.is_pascal_plus and self.name != "P100"
@property
def is_pascal_10x(self):
return self.is_pascal and self.name != "P100"
@property
def is_volta(self):
return GPU_ARCHES.index(self.arch) == GPU_ARCHES.index("volta")
@property
def is_volta_plus(self):
return GPU_ARCHES.index(self.arch) >= GPU_ARCHES.index("volta")
@property
def is_turing(self):
return GPU_ARCHES.index(self.arch) == GPU_ARCHES.index("turing")
@property
def is_turing_plus(self):
return GPU_ARCHES.index(self.arch) >= GPU_ARCHES.index("turing")
@property
def is_ampere(self):
return GPU_ARCHES.index(self.arch) == GPU_ARCHES.index("ampere")
@property
def is_ampere_plus(self):
return GPU_ARCHES.index(self.arch) >= GPU_ARCHES.index("ampere")
@property
def is_ampere_100(self):
return self.name in ["A100", "A30"]
@property
def is_ampere_10x(self):
return self.is_ampere and not self.is_ampere_100
@property
def is_ampere_10x_plus(self):
return self.is_ampere_plus and not self.is_ampere_100
@property
def is_hopper(self):
return GPU_ARCHES.index(self.arch) == GPU_ARCHES.index("hopper")
@property
def is_hopper_plus(self):
return GPU_ARCHES.index(self.arch) >= GPU_ARCHES.index("hopper")
@property
def is_hopper_100(self):
return self.name in ["H100-PCIE", "H100-SXM"]
@property
def has_fsp(self):
return self.is_hopper_plus
def is_gpu(self):
return True
@property
def is_module_name_supported(self):
return self.name == "H100-SXM"
@property
def module_name(self):
if self._mod_name != None:
return self._mod_name
self._mod_name = f"SXM_{self.read_module_id() + 1}"
return self._mod_name
def vbios_scratch_register(self, index):
if self.is_turing_plus:
return 0x1400 + index * 4
else:
return 0x1580 + index * 4
def load_vbios(self):
if self.bios:
return
self._load_bios()
def reload_vbios(self):
self._load_bios()
def query_cc_mode(self):
assert self.is_cc_query_supported
self.wait_for_boot()
cc_reg = self.read(0x1182cc)
cc_state = cc_reg & 0x3
if cc_state == 0x3:
return "devtools"
elif cc_state == 0x1:
return "on"
elif cc_state == 0x0:
return "off"
raise GpuError(f"Unexpected CC state 0x{cc_reg}")
def set_cc_mode(self, mode):
assert self.is_cc_query_supported
cc_mode = 0x0
cc_dev_mode = 0x0
bar0_decoupler_val = 0x0
if mode == "on":
cc_mode = 0x1
bar0_decoupler_val = 0x1
elif mode == "devtools":
cc_mode = 0x1
cc_dev_mode = 0x1
elif mode == "off":
pass
else:
raise ValueError(f"Invalid mode {mode}")
self._init_fsp_rpc()
if cc_mode == 0x1:
self.fsp_rpc.prc_knob_check_and_write(PrcKnob.PRC_KNOB_ID_02.value, 0x0)
self.fsp_rpc.prc_knob_check_and_write(PrcKnob.PRC_KNOB_ID_04.value, 0x0)
self.fsp_rpc.prc_knob_check_and_write(PrcKnob.PRC_KNOB_ID_22.value, 0x0)
self.fsp_rpc.prc_knob_check_and_write(PrcKnob.PRC_KNOB_ID_BAR0_DECOUPLER.value, bar0_decoupler_val)
self.fsp_rpc.prc_knob_check_and_write(PrcKnob.PRC_KNOB_ID_CCD.value, cc_dev_mode)
self.fsp_rpc.prc_knob_check_and_write(PrcKnob.PRC_KNOB_ID_CCM.value, cc_mode)
def query_cc_settings(self):
assert self.is_cc_query_supported
self._init_fsp_rpc()
knobs = [
("enable", PrcKnob.PRC_KNOB_ID_CCM.value),
("enable-devtools", PrcKnob.PRC_KNOB_ID_CCD.value),
("enable-allow-inband-control", PrcKnob.PRC_KNOB_ID_CCM_ALLOW_INB.value),
("enable-devtools-allow-inband-control", PrcKnob.PRC_KNOB_ID_CCD_ALLOW_INB.value),
]
knob_state = []
for name, knob_id in knobs:
knob_value = self.fsp_rpc.prc_knob_read(knob_id)
knob_state.append((name, knob_value))
return knob_state
def query_prc_knobs(self):
assert self.has_fsp
self._init_fsp_rpc()
knob_state = []
for knob in PrcKnob:
knob_value = self.fsp_rpc.prc_knob_read(knob.value)
knob_state.append((knob.name, knob_value))
return knob_state
def wait_for_boot(self):
assert self.is_turing_plus
if self.is_hopper_plus:
try:
self.poll_register("boot_complete", 0x200bc, 0xff, 5)
except GpuError as err:
_, _, tb = sys.exc_info()
debug("{} boot not done 0x{:x} = 0x{:x}".format(self, 0x200bc, self.read(0x200bc)))
for offset in range(0, 4*4, 4):
debug_offset = 0x8f0320 + offset
debug(" 0x{:x} = 0x{:x}".format(debug_offset, self.read(debug_offset)))
traceback.print_tb(tb)
raise
else:
self.poll_register("boot_complete", 0x118234, 0x3ff, 5)
def _is_read_good(self, reg, data):
return data >> 16 != 0xbadf
def read_bad_ok(self, reg):
data = self.bar0.read32(reg)
return data
def check_read(self, reg):
data = self.bar0.read32(reg)
return self._is_read_good(reg, data)
def read(self, reg):
data = self.bar0.read32(reg)
if not self._is_read_good(reg, data):
raise GpuError("gpu %s reg %s = %s, bad?" % (self, hex(reg), hex(data)))
return data
def read_bar1(self, offset):
return self.bar1.read32(offset)
def write_bar1(self, offset, data):
return self.bar1.write32(offset, data)
# Init priv ring (internal bus)
def init_priv_ring(self):
self.write(0x12004c, 0x4)
self.write(0x122204, 0x2)
def flr_resettable_scratch(self):
if self.is_volta_plus:
return self.vbios_scratch_register(22)
else:
return self.vbios_scratch_register(15)
def sbr_resettable_scratch(self):
if self.is_hopper_plus:
return 0x91288
if self.is_ampere_plus:
return 0x88e10
return self.flr_resettable_scratch()
def __str__(self):
return "GPU %s %s %s BAR0 0x%x" % (self.bdf, self.name, hex(self.device), self.bar0_addr)
def __eq__(self, other):
return self.bar0_addr == other.bar0_addr
def print_topo_indent(root, indent):
if root.is_hidden():
indent = indent - 1
else:
print(" " * indent, root)
for c in root.children:
print_topo_indent(c, indent + 1)
def print_topo():
print("Topo:")
for c in DEVICES:
dev = DEVICES[c]
if dev.is_root():
print_topo_indent(dev, 1)
sys.stdout.flush()
def create_args():
argp = optparse.OptionParser(usage="usage: %prog [options]")
argp.add_option("--gpu", type="int", default=-1)
argp.add_option("--gpu-bdf", help="Select a single GPU by providing a substring of the BDF, e.g. '01:00'.")
argp.add_option("--gpu-name", help="Select a single GPU by providing a substring of the GPU name, e.g. 'T4'. If multiple GPUs match, the first one will be used.")
argp.add_option("--no-gpu", action='store_true', help="Do not use any of the GPUs; commands requiring one will not work.")
argp.add_option("--log", type="choice", choices=['debug', 'info', 'warning', 'error', 'critical'], default='info')
argp.add_option("--reset-with-os", action='store_true', default=False,
help="Reset with OS through /sys/.../reset")
argp.add_option("--query-cc-mode", action='store_true', default=False,
help="Query the current Confidential Computing (CC) mode of the GPU.")
argp.add_option("--query-cc-settings", action='store_true', default=False,
help="Query the Confidential Computing (CC) settings of the GPU."
"This prints the lower level setting knobs that will take effect upon GPU reset.")
argp.add_option("--set-cc-mode", type='choice', choices=["off", "on", "devtools"],
help="Configure Confidentail Computing (CC) mode. The choices are off (disabled), on (enabled) or devtools (enabled in DevTools mode)."
"The GPU needs to be reset to make the selected mode active. See --reset-after-cc-mode-switch for one way of doing it.")
argp.add_option("--reset-after-cc-mode-switch", action='store_true', default=False,
help="Reset the GPU after switching CC mode such that it is activated immediately.")
return argp
# Called instead of main() when imported as a library rather than run as a
# command.
def init():
global opts
argp = create_args()
(opts, _) = argp.parse_args([])
def main():
# Replace 5678 with the desired port number
#debugpy.listen(('localhost', 5678))
#print("Waiting for debugger to attach...")
#debugpy.wait_for_client()
print("NVIDIA GPU Tools version {0}".format(VERSION))
sys.stdout.flush()
global opts
argp = create_args()
(opts, args) = argp.parse_args()
if len(args) != 0:
print("ERROR: Exactly zero positional argument expected.")
argp.print_usage()
sys.exit(1)
logging.basicConfig(level=getattr(logging, opts.log.upper()),
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d,%H:%M:%S')
if opts.gpu_bdf is not None:
gpus, other = find_gpus(opts.gpu_bdf)
if len(gpus) == 0:
error("Matching for {0} found nothing".format(opts.gpu_bdf))
sys.exit(1)
elif len(gpus) > 1:
error("Matching for {0} found more than one GPU {1}".format(opts.gpu_bdf, ", ".join([str(g) for g in gpus])))
sys.exit(1)
else:
gpu = gpus[0]
elif opts.gpu_name is not None:
gpus, other = find_gpus()
gpus = [g for g in gpus if opts.gpu_name in g.name]
if len(gpus) == 0:
error("Matching for {0} found nothing".format(opts.gpu_name))
sys.exit(1)
gpu = gpus[0]
else:
gpus, other = find_gpus()
print("GPUs:")
for i, g in enumerate(gpus):
print(" ", i, g)
print("Other:")
for i, o in enumerate(other):
print(" ", i, o)
sys.stdout.flush()
if opts.gpu == -1:
info("No GPU specified, select GPU with --gpu, --gpu-bdf, or --gpu-name")
return 0
if opts.gpu >= len(gpus):
raise ValueError("GPU index out of bounds")
gpu = gpus[opts.gpu]
if gpu:
print_topo()
info("Selected %s", gpu)
if gpu.is_gpu() and gpu.is_hopper_plus:
cc_mode = gpu.query_cc_mode()
if cc_mode != "off":
warning(f"{gpu} has CC mode {cc_mode}, some functionality may not work")
if opts.reset_with_os:
gpu.sysfs_reset()
if opts.query_cc_settings:
if not gpu.is_gpu() or not gpu.is_cc_query_supported:
error(f"Querying CC settings is not supported on {gpu}")
sys.exit(1)
cc_settings = gpu.query_cc_settings()
info(f"{gpu} CC settings:")
for name, value in cc_settings:
info(f" {name} = {value}")
if opts.set_cc_mode:
if not gpu.is_gpu() or not gpu.is_cc_query_supported:
error(f"Configuring CC not supported on {gpu}")
sys.exit(1)
gpu.set_cc_mode(opts.set_cc_mode)
info(f"{gpu} CC mode set to {opts.set_cc_mode}. It will be active after GPU reset.")
if opts.reset_after_cc_mode_switch:
gpu.reset_with_os()
new_mode = gpu.query_cc_mode()
if new_mode != opts.set_cc_mode:
raise GpuError(f"{gpu} failed to switch to CC mode {opts.set_cc_mode}, current mode is {new_mode}.")
info(f"{gpu} was reset to apply the new CC mode.")
if opts.query_cc_mode:
if not gpu.is_gpu() or not gpu.is_cc_query_supported:
error(f"Querying CC mode is not supported on {gpu}")
sys.exit(1)
cc_mode = gpu.query_cc_mode()
info(f"{gpu} CC mode is {cc_mode}")
if __name__ == "__main__":
main()
else:
init()
|
nvtrust-main
|
host_tools/python/gpu_cc_tool.py
|
import numpy as np
import matplotlib.pyplot as plt
lattice = np.loadtxt("final.txt", dtype=np.int32)
plt.imshow(lattice)
plt.title('Final Lattice Configuration')
plt.colorbar()
plt.show()
|
ising-gpu-master
|
basic_cuda/plot_ising.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import math
import sys
import time
import cupy.cuda.curand as curand
from mpi4py import MPI
from numba import cuda
from numba import vectorize
import numpy as np
# Set constants
TCRIT = 2.26918531421 # critical temperature
# Setup MPI and get neighbor ranks
comm = MPI.COMM_WORLD
rank = comm.rank
rank_up = comm.rank - 1 if (comm.rank - 1 >= 0) else comm.size - 1
rank_down = comm.rank + 1 if (comm.rank + 1 < comm.size) else 0
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--lattice-n", '-x', type=int, default=40*128, help="number of lattice rows")
parser.add_argument("--lattice-m", '-y', type=int, default=40*128, help="number of lattice columns")
parser.add_argument("--nwarmup", '-w', type=int, default=100, help="number of warmup iterations")
parser.add_argument("--niters", '-n', type=int, default=1000, help="number of trial iterations")
parser.add_argument("--alpha", '-a', type=float, default=0.1, help="coefficient of critical temperature")
parser.add_argument("--seed", '-s', type=int, default=1234, help="seed for random number generation")
parser.add_argument("--write-lattice", '-o', action='store_true', help="write final lattice configuration to file/s")
parser.add_argument("--use-common-seed", '-c', action='store_true', help="Use common seed for all ranks + updating offset. " +
"Yields consistent results independent of number " +
"of GPUs but is slower.")
args = parser.parse_args()
# Check arguments
if args.lattice_m % 2 != 0:
raise Exception("lattice_m must be an even value. Aborting.")
if args.lattice_n % comm.size != 0:
raise Exception("lattice_n must be evenly divisible by number of GPUs. Aborting.")
if (args.lattice_n / comm.size) % 2 != 0:
raise Exception("Slab width (lattice_n / nGPUs) must be an even value. Aborting.")
# Compute slab width
lattice_slab_n = args.lattice_n // comm.size
inv_temp = (1.0) / (args.alpha * TCRIT)
# Generate lattice with random spins with shape of randval array
@vectorize(['int8(float32)'], target='cuda')
def generate_lattice(randval):
return 1 if randval > 0.5 else -1
@cuda.jit
def update_lattice_multi(lattice, op_lattice, op_lattice_up, op_lattice_down, randvals, is_black):
n,m = lattice.shape
tid = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
j = tid % m
i = tid // m
if (i >= n or j >= m): return
# Set stencil indices with periodicity
jpp = (j + 1) if (j + 1) < m else 0
jnn = (j - 1) if (j - 1) >= 0 else (m - 1)
# Select off-column index based on color and row index parity
if (is_black):
joff = jpp if (i % 2) else jnn
else:
joff = jnn if (i % 2) else jpp
# Compute sum of nearest neighbor spins (taking values from neighboring
# lattice slabs if required)
nn_sum = op_lattice[i, j] + op_lattice[i, joff]
nn_sum += op_lattice[i - 1, j] if (i - 1) >= 0 else op_lattice_up[n - 1, j]
nn_sum += op_lattice[i + 1, j] if (i + 1) < n else op_lattice_down[0, j]
# Determine whether to flip spin
lij = lattice[i, j]
acceptance_ratio = math.exp(-2.0 * inv_temp * nn_sum * lij)
if (randvals[i, j] < acceptance_ratio):
lattice[i, j] = -lij
# Create lattice update kernel (for single GPU case, this version with fewer arguments
# is a bit faster due to launch overhead introduced by numba)
@cuda.jit
def update_lattice(lattice, op_lattice, randvals, is_black):
n,m = lattice.shape
tid = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
i = tid // m
j = tid % m
if (i >= n or j >= m): return
# Set stencil indices with periodicity
ipp = (i + 1) if (i + 1) < n else 0
jpp = (j + 1) if (j + 1) < m else 0
inn = (i - 1) if (i - 1) >= 0 else (n - 1)
jnn = (j - 1) if (j - 1) >= 0 else (m - 1)
# Select off-column index based on color and row index parity
if (is_black):
joff = jpp if (i % 2) else jnn
else:
joff = jnn if (i % 2) else jpp
# Compute sum of nearest neighbor spins
nn_sum = op_lattice[inn, j] + op_lattice[i, j] + op_lattice[ipp, j] + op_lattice[i, joff]
# Determine whether to flip spin
lij = lattice[i, j]
acceptance_ratio = math.exp(-2.0 * inv_temp * nn_sum * lij)
if (randvals[i, j] < acceptance_ratio):
lattice[i, j] = -lij
# Write lattice configuration to file
def write_lattice(prefix, lattice_b, lattice_w):
lattice_b_h = lattice_b.copy_to_host()
lattice_w_h = lattice_w.copy_to_host()
lattice = np.zeros((lattice_slab_n, args.lattice_m), dtype=np.int8)
for i in range(lattice_slab_n):
for j in range(args.lattice_m // 2):
if (i % 2):
lattice[i, 2*j+1] = lattice_b_h[i, j]
lattice[i, 2*j] = lattice_w_h[i, j]
else:
lattice[i, 2*j] = lattice_b_h[i, j]
lattice[i, 2*j+1] = lattice_w_h[i, j]
print("Writing lattice to {}_rank{}.txt...".format(prefix, rank))
np.savetxt("{}_rank{}.txt".format(prefix, rank), lattice, fmt='%d')
# Helper class for random number generation
class curandUniformRNG:
def __init__(self, seed=0):
rng = curand.createGenerator(curand.CURAND_RNG_PSEUDO_PHILOX4_32_10)
curand.setPseudoRandomGeneratorSeed(rng, seed)
if (args.use_common_seed):
self.offset = rank * lattice_slab_n * args.lattice_m // 2
curand.setGeneratorOffset(rng, self.offset)
self._rng = rng
def fill_random(self, arr):
ptr = arr.__cuda_array_interface__['data'][0]
curand.generateUniform(self._rng, ptr, arr.size)
if (args.use_common_seed):
self.offset += args.lattice_n * args.lattice_m // 2
curand.setGeneratorOffset(self._rng, self.offset)
# Helper function to perform device sync plus MPI barrier
def sync():
cuda.synchronize()
comm.barrier()
def update(lattices_b, lattices_w, randvals, rng):
# Setup CUDA launch configuration
threads = 128
blocks = (args.lattice_m // 2 * lattice_slab_n + threads - 1) // threads
if (comm.size > 1):
# Update black
rng.fill_random(randvals)
update_lattice_multi[blocks, threads](lattices_b[rank], lattices_w[rank], lattices_w[rank_up], lattices_w[rank_down], randvals, True)
sync()
# Update white
rng.fill_random(randvals)
update_lattice_multi[blocks, threads](lattices_w[rank], lattices_b[rank], lattices_b[rank_up], lattices_b[rank_down], randvals, False)
sync()
else:
# Update black
rng.fill_random(randvals)
update_lattice[blocks, threads](lattices_b[rank], lattices_w[rank], randvals, True)
# Update white
rng.fill_random(randvals)
update_lattice[blocks, threads](lattices_w[rank], lattices_b[rank], randvals, False)
# Set device
cuda.select_device(rank)
# Setup cuRAND generator
rng = curandUniformRNG(seed=args.seed if args.use_common_seed else args.seed + 42 * rank)
randvals = cuda.device_array((lattice_slab_n, args.lattice_m // 2), dtype=np.float32)
# Setup black and white lattice arrays on device
rng.fill_random(randvals)
lattice_b = generate_lattice(randvals)
rng.fill_random(randvals)
lattice_w = generate_lattice(randvals)
# Setup/open CUDA IPC handles
ipch_b = comm.allgather(lattice_b.get_ipc_handle())
ipch_w = comm.allgather(lattice_w.get_ipc_handle())
lattices_b = [x.open() if i != rank else lattice_b for i,x in enumerate(ipch_b)]
lattices_w = [x.open() if i != rank else lattice_w for i,x in enumerate(ipch_w)]
# Warmup iterations
if rank == 0:
print("Starting warmup...")
sys.stdout.flush()
sync()
for i in range(args.nwarmup):
update(lattices_b, lattices_w, randvals, rng)
sync()
# Trial iterations
if rank == 0:
print("Starting trial iterations...")
sys.stdout.flush()
t0 = time.time()
for i in range(args.niters):
update(lattices_b, lattices_w, randvals, rng)
if (rank == 0 and i % 1000 == 0):
print("Completed {}/{} iterations...".format(i+1, args.niters))
sys.stdout.flush()
sync()
t1 = time.time()
t = t1 - t0
# Compute average magnetism
m = (np.sum(lattices_b[rank], dtype=np.int64) + np.sum(lattices_w[rank], dtype=np.int64)) / float(args.lattice_n * args.lattice_m)
m_global = comm.allreduce(m, MPI.SUM)
if (rank == 0):
print("REPORT:")
print("\tnGPUs: {}".format(comm.size))
print("\ttemperature: {} * {}".format(args.alpha, TCRIT))
print("\tseed: {}".format(args.seed))
print("\twarmup iterations: {}".format(args.nwarmup))
print("\ttrial iterations: {}".format(args.niters))
print("\tlattice dimensions: {} x {}".format(args.lattice_n, args.lattice_m))
print("\telapsed time: {} sec".format(t))
print("\tupdates per ns: {}".format((args.lattice_n * args.lattice_m * args.niters) / t * 1e-9))
print("\taverage magnetism (absolute): {}".format(np.abs(m_global)))
sys.stdout.flush()
sync()
if (args.write_lattice):
write_lattice("final", lattices_b[rank], lattices_w[rank])
|
ising-gpu-master
|
basic_python/ising_basic.py
|
import glob
import matplotlib.pyplot as plt
import numpy as np
files = sorted(glob.glob("final_rank*.txt"))
if (len(files) == 0):
raise Exception("Could not find any lattice files. Expecting files named 'final_rank*.txt' for processing")
lattice = np.loadtxt(files[0], dtype=np.int32)
for i,f in enumerate(files):
if i == 0: continue
lattice = np.concatenate((lattice, np.loadtxt(f, dtype=np.int32)))
plt.imshow(lattice)
plt.title('Final Lattice Configuration')
plt.colorbar()
plt.show()
|
ising-gpu-master
|
basic_python/plot_ising_multi.py
|
import numpy as np
import matplotlib.pyplot as plt
lattice = np.loadtxt("final.txt", dtype=np.int32)
plt.imshow(lattice)
plt.title('Final Lattice Configuration')
plt.colorbar()
plt.show()
|
ising-gpu-master
|
tensorcore/plot_ising.py
|
#!/usr/bin/env python
import sys
import numpy as np
from matplotlib import pyplot as plt
data = []
f=open(sys.argv[1])
for l in f:
data.append([int(c) for c in l.strip(" \n\r")])
print len(data), 'x', len(data[0])
plt.imshow(data, interpolation='nearest')
outFile = sys.argv[1]+".png"
plt.savefig(outFile)
|
ising-gpu-master
|
optimized/plotLattice.py
|
import sys
import warnings
import os
import glob
from packaging.version import parse, Version
from setuptools import setup, find_packages
import subprocess
import torch
from torch.utils.cpp_extension import (
BuildExtension,
CppExtension,
CUDAExtension,
CUDA_HOME,
load,
)
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
bare_metal_version = parse(output[release_idx].split(",")[0])
return raw_output, bare_metal_version
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
torch_binary_version = parse(torch.version.cuda)
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_version != torch_binary_version):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def check_cudnn_version_and_warn(global_option: str, required_cudnn_version: int) -> bool:
cudnn_available = torch.backends.cudnn.is_available()
cudnn_version = torch.backends.cudnn.version() if cudnn_available else None
if not (cudnn_available and (cudnn_version >= required_cudnn_version)):
warnings.warn(
f"Skip `{global_option}` as it requires cuDNN {required_cudnn_version} or later, "
f"but {'cuDNN is not available' if not cudnn_available else cudnn_version}"
)
return False
return True
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.8"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
elif bare_metal_version >= Version("11.1"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
elif bare_metal_version == Version("11.0"):
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
raise RuntimeError(
"Apex requires Pytorch 0.4 or newer.\nThe latest stable release can be obtained from https://pytorch.org/"
)
cmdclass = {}
ext_modules = []
extras = {}
if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv:
if TORCH_MAJOR == 0:
raise RuntimeError(
"--cpp_ext requires Pytorch 1.0 or later, " "found torch.__version__ = {}".format(torch.__version__)
)
if "--cpp_ext" in sys.argv:
sys.argv.remove("--cpp_ext")
ext_modules.append(CppExtension("apex_C", ["csrc/flatten_unflatten.cpp"]))
# Set up macros for forward/backward compatibility hack around
# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
# and
# https://github.com/NVIDIA/apex/issues/456
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ["-DVERSION_GE_1_1"]
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ["-DVERSION_GE_1_3"]
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ["-DVERSION_GE_1_5"]
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if "--distributed_adam" in sys.argv:
sys.argv.remove("--distributed_adam")
raise_if_cuda_home_none("--distributed_adam")
ext_modules.append(
CUDAExtension(
name="distributed_adam_cuda",
sources=[
"apex/contrib/csrc/optimizers/multi_tensor_distopt_adam.cpp",
"apex/contrib/csrc/optimizers/multi_tensor_distopt_adam_kernel.cu",
],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": ["-O3", "--use_fast_math"] + version_dependent_macros,
},
)
)
if "--distributed_lamb" in sys.argv:
sys.argv.remove("--distributed_lamb")
raise_if_cuda_home_none("--distributed_lamb")
ext_modules.append(
CUDAExtension(
name="distributed_lamb_cuda",
sources=[
"apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb.cpp",
"apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb_kernel.cu",
],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": ["-O3", "--use_fast_math"] + version_dependent_macros,
},
)
)
if "--cuda_ext" in sys.argv:
sys.argv.remove("--cuda_ext")
raise_if_cuda_home_none("--cuda_ext")
check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)
ext_modules.append(
CUDAExtension(
name="amp_C",
sources=[
"csrc/amp_C_frontend.cpp",
"csrc/multi_tensor_sgd_kernel.cu",
"csrc/multi_tensor_scale_kernel.cu",
"csrc/multi_tensor_axpby_kernel.cu",
"csrc/multi_tensor_l2norm_kernel.cu",
"csrc/multi_tensor_l2norm_kernel_mp.cu",
"csrc/multi_tensor_l2norm_scale_kernel.cu",
"csrc/multi_tensor_lamb_stage_1.cu",
"csrc/multi_tensor_lamb_stage_2.cu",
"csrc/multi_tensor_adam.cu",
"csrc/multi_tensor_adagrad.cu",
"csrc/multi_tensor_novograd.cu",
"csrc/multi_tensor_lamb.cu",
"csrc/multi_tensor_lamb_mp.cu",
],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": [
"-lineinfo",
"-O3",
# '--resource-usage',
"--use_fast_math",
] + version_dependent_macros,
},
)
)
ext_modules.append(
CUDAExtension(
name="syncbn",
sources=["csrc/syncbn.cpp", "csrc/welford.cu"],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": ["-O3"] + version_dependent_macros,
},
)
)
ext_modules.append(
CUDAExtension(
name="fused_layer_norm_cuda",
sources=["csrc/layer_norm_cuda.cpp", "csrc/layer_norm_cuda_kernel.cu"],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": ["-maxrregcount=50", "-O3", "--use_fast_math"] + version_dependent_macros,
},
)
)
ext_modules.append(
CUDAExtension(
name="mlp_cuda",
sources=["csrc/mlp.cpp", "csrc/mlp_cuda.cu"],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": ["-O3"] + version_dependent_macros,
},
)
)
ext_modules.append(
CUDAExtension(
name="fused_dense_cuda",
sources=["csrc/fused_dense.cpp", "csrc/fused_dense_cuda.cu"],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": ["-O3"] + version_dependent_macros,
},
)
)
ext_modules.append(
CUDAExtension(
name="scaled_upper_triang_masked_softmax_cuda",
sources=[
"csrc/megatron/scaled_upper_triang_masked_softmax.cpp",
"csrc/megatron/scaled_upper_triang_masked_softmax_cuda.cu",
],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": [
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
] + version_dependent_macros,
},
)
)
ext_modules.append(
CUDAExtension(
name="generic_scaled_masked_softmax_cuda",
sources=[
"csrc/megatron/generic_scaled_masked_softmax.cpp",
"csrc/megatron/generic_scaled_masked_softmax_cuda.cu",
],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": [
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
] + version_dependent_macros,
},
)
)
ext_modules.append(
CUDAExtension(
name="scaled_masked_softmax_cuda",
sources=["csrc/megatron/scaled_masked_softmax.cpp", "csrc/megatron/scaled_masked_softmax_cuda.cu"],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": [
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
] + version_dependent_macros,
},
)
)
ext_modules.append(
CUDAExtension(
name="scaled_softmax_cuda",
sources=["csrc/megatron/scaled_softmax.cpp", "csrc/megatron/scaled_softmax_cuda.cu"],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": [
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
] + version_dependent_macros,
},
)
)
if bare_metal_version >= Version("11.0"):
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
if bare_metal_version >= Version("11.1"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_86,code=sm_86")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
ext_modules.append(
CUDAExtension(
name="fused_weight_gradient_mlp_cuda",
include_dirs=[os.path.join(this_dir, "csrc")],
sources=[
"csrc/megatron/fused_weight_gradient_dense.cpp",
"csrc/megatron/fused_weight_gradient_dense_cuda.cu",
"csrc/megatron/fused_weight_gradient_dense_16bit_prec_cuda.cu",
],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": [
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
] + version_dependent_macros + cc_flag,
},
)
)
if "--permutation_search" in sys.argv:
sys.argv.remove("--permutation_search")
if CUDA_HOME is None:
raise RuntimeError("--permutation_search was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
else:
cc_flag = ['-Xcompiler', '-fPIC', '-shared']
ext_modules.append(
CUDAExtension(name='permutation_search_cuda',
sources=['apex/contrib/sparsity/permutation_search_kernels/CUDA_kernels/permutation_search_kernels.cu'],
include_dirs=[os.path.join(this_dir, 'apex', 'contrib', 'sparsity', 'permutation_search_kernels', 'CUDA_kernels')],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-O3'] + version_dependent_macros + cc_flag}))
if "--bnp" in sys.argv:
sys.argv.remove("--bnp")
raise_if_cuda_home_none("--bnp")
ext_modules.append(
CUDAExtension(
name="bnp",
sources=[
"apex/contrib/csrc/groupbn/batch_norm.cu",
"apex/contrib/csrc/groupbn/ipc.cu",
"apex/contrib/csrc/groupbn/interface.cpp",
"apex/contrib/csrc/groupbn/batch_norm_add_relu.cu",
],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": [] + version_dependent_macros,
"nvcc": [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] + version_dependent_macros,
},
)
)
if "--xentropy" in sys.argv:
from datetime import datetime
sys.argv.remove("--xentropy")
raise_if_cuda_home_none("--xentropy")
xentropy_ver = datetime.today().strftime("%y.%m.%d")
print(f"`--xentropy` setting version of {xentropy_ver}")
ext_modules.append(
CUDAExtension(
name="xentropy_cuda",
sources=["apex/contrib/csrc/xentropy/interface.cpp", "apex/contrib/csrc/xentropy/xentropy_kernel.cu"],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros + [f'-DXENTROPY_VER="{xentropy_ver}"'],
"nvcc": ["-O3"] + version_dependent_macros,
},
)
)
if "--focal_loss" in sys.argv:
sys.argv.remove("--focal_loss")
raise_if_cuda_home_none("--focal_loss")
ext_modules.append(
CUDAExtension(
name='focal_loss_cuda',
sources=[
'apex/contrib/csrc/focal_loss/focal_loss_cuda.cpp',
'apex/contrib/csrc/focal_loss/focal_loss_cuda_kernel.cu',
],
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={
'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-O3', '--use_fast_math', '--ftz=false'] + version_dependent_macros,
},
)
)
if "--group_norm" in sys.argv:
sys.argv.remove("--group_norm")
raise_if_cuda_home_none("--group_norm")
# CUDA group norm supports from SM70
arch_flags = []
for arch in [70, 75, 80, 86, 90]:
arch_flag = f"-gencode=arch=compute_{arch},code=sm_{arch}"
arch_flags.append(arch_flag)
arch_flag = f"-gencode=arch=compute_90,code=compute_90"
arch_flags.append(arch_flag)
ext_modules.append(
CUDAExtension(
name="group_norm_cuda",
sources=[
"apex/contrib/csrc/group_norm/group_norm_nhwc_op.cpp",
] + glob.glob("apex/contrib/csrc/group_norm/*.cu"),
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={
"cxx": ["-O3", "-std=c++17"] + version_dependent_macros,
"nvcc": [
"-O3", "-std=c++17", "--use_fast_math", "--ftz=false",
] + arch_flags + version_dependent_macros,
},
)
)
if "--index_mul_2d" in sys.argv:
sys.argv.remove("--index_mul_2d")
raise_if_cuda_home_none("--index_mul_2d")
ext_modules.append(
CUDAExtension(
name='fused_index_mul_2d',
sources=[
'apex/contrib/csrc/index_mul_2d/index_mul_2d_cuda.cpp',
'apex/contrib/csrc/index_mul_2d/index_mul_2d_cuda_kernel.cu',
],
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={
'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-O3', '--use_fast_math', '--ftz=false'] + version_dependent_macros,
},
)
)
if "--deprecated_fused_adam" in sys.argv:
sys.argv.remove("--deprecated_fused_adam")
raise_if_cuda_home_none("--deprecated_fused_adam")
ext_modules.append(
CUDAExtension(
name="fused_adam_cuda",
sources=[
"apex/contrib/csrc/optimizers/fused_adam_cuda.cpp",
"apex/contrib/csrc/optimizers/fused_adam_cuda_kernel.cu",
],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": ["-O3", "--use_fast_math"] + version_dependent_macros,
},
)
)
if "--deprecated_fused_lamb" in sys.argv:
sys.argv.remove("--deprecated_fused_lamb")
raise_if_cuda_home_none("--deprecated_fused_lamb")
ext_modules.append(
CUDAExtension(
name="fused_lamb_cuda",
sources=[
"apex/contrib/csrc/optimizers/fused_lamb_cuda.cpp",
"apex/contrib/csrc/optimizers/fused_lamb_cuda_kernel.cu",
"csrc/multi_tensor_l2norm_kernel.cu",
],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": ["-O3", "--use_fast_math"] + version_dependent_macros,
},
)
)
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ["-DOLD_GENERATOR_PATH"]
if "--fast_layer_norm" in sys.argv:
sys.argv.remove("--fast_layer_norm")
raise_if_cuda_home_none("--fast_layer_norm")
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
if bare_metal_version >= Version("11.0"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
ext_modules.append(
CUDAExtension(
name="fast_layer_norm",
sources=[
"apex/contrib/csrc/layer_norm/ln_api.cpp",
"apex/contrib/csrc/layer_norm/ln_fwd_cuda_kernel.cu",
"apex/contrib/csrc/layer_norm/ln_bwd_semi_cuda_kernel.cu",
],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros + generator_flag,
"nvcc": [
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
"-I./apex/contrib/csrc/layer_norm/",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
] + version_dependent_macros + generator_flag + cc_flag,
},
include_dirs=[os.path.join(this_dir, "apex/contrib/csrc/layer_norm")],
)
)
if "--fmha" in sys.argv:
sys.argv.remove("--fmha")
raise_if_cuda_home_none("--fmha")
if bare_metal_version < Version("11.0"):
raise RuntimeError("--fmha only supported on sm_80 and sm_90 GPUs")
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
ext_modules.append(
CUDAExtension(
name="fmhalib",
sources=[
"apex/contrib/csrc/fmha/fmha_api.cpp",
"apex/contrib/csrc/fmha/src/fmha_fill.cu",
"apex/contrib/csrc/fmha/src/fmha_noloop_reduce.cu",
"apex/contrib/csrc/fmha/src/fmha_fprop_fp16_128_64_kernel.sm80.cu",
"apex/contrib/csrc/fmha/src/fmha_fprop_fp16_256_64_kernel.sm80.cu",
"apex/contrib/csrc/fmha/src/fmha_fprop_fp16_384_64_kernel.sm80.cu",
"apex/contrib/csrc/fmha/src/fmha_fprop_fp16_512_64_kernel.sm80.cu",
"apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_128_64_kernel.sm80.cu",
"apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_256_64_kernel.sm80.cu",
"apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_384_64_kernel.sm80.cu",
"apex/contrib/csrc/fmha/src/fmha_dgrad_fp16_512_64_kernel.sm80.cu",
],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros + generator_flag,
"nvcc": [
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
] + version_dependent_macros + generator_flag + cc_flag,
},
include_dirs=[
os.path.join(this_dir, "apex/contrib/csrc"),
os.path.join(this_dir, "apex/contrib/csrc/fmha/src"),
],
)
)
if "--fast_multihead_attn" in sys.argv:
sys.argv.remove("--fast_multihead_attn")
raise_if_cuda_home_none("--fast_multihead_attn")
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
if bare_metal_version >= Version("11.0"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
if bare_metal_version >= Version("11.1"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_86,code=sm_86")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/multihead_attn/cutlass"])
ext_modules.append(
CUDAExtension(
name="fast_multihead_attn",
sources=[
"apex/contrib/csrc/multihead_attn/multihead_attn_frontend.cpp",
"apex/contrib/csrc/multihead_attn/additive_masked_softmax_dropout_cuda.cu",
"apex/contrib/csrc/multihead_attn/masked_softmax_dropout_cuda.cu",
"apex/contrib/csrc/multihead_attn/encdec_multihead_attn_cuda.cu",
"apex/contrib/csrc/multihead_attn/encdec_multihead_attn_norm_add_cuda.cu",
"apex/contrib/csrc/multihead_attn/self_multihead_attn_cuda.cu",
"apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_additive_mask_cuda.cu",
"apex/contrib/csrc/multihead_attn/self_multihead_attn_bias_cuda.cu",
"apex/contrib/csrc/multihead_attn/self_multihead_attn_norm_add_cuda.cu",
],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros + generator_flag,
"nvcc": [
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
]
+ version_dependent_macros
+ generator_flag
+ cc_flag,
},
include_dirs=[
os.path.join(this_dir, "apex/contrib/csrc/multihead_attn/cutlass/include/"),
os.path.join(this_dir, "apex/contrib/csrc/multihead_attn/cutlass/tools/util/include")
],
)
)
if "--transducer" in sys.argv:
sys.argv.remove("--transducer")
raise_if_cuda_home_none("--transducer")
ext_modules.append(
CUDAExtension(
name="transducer_joint_cuda",
sources=[
"apex/contrib/csrc/transducer/transducer_joint.cpp",
"apex/contrib/csrc/transducer/transducer_joint_kernel.cu",
],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros + generator_flag,
"nvcc": ["-O3"] + version_dependent_macros + generator_flag,
},
include_dirs=[os.path.join(this_dir, "csrc"), os.path.join(this_dir, "apex/contrib/csrc/multihead_attn")],
)
)
ext_modules.append(
CUDAExtension(
name="transducer_loss_cuda",
sources=[
"apex/contrib/csrc/transducer/transducer_loss.cpp",
"apex/contrib/csrc/transducer/transducer_loss_kernel.cu",
],
include_dirs=[os.path.join(this_dir, "csrc")],
extra_compile_args={
"cxx": ["-O3"] + version_dependent_macros,
"nvcc": ["-O3"] + version_dependent_macros,
},
)
)
if "--cudnn_gbn" in sys.argv:
sys.argv.remove("--cudnn_gbn")
raise_if_cuda_home_none("--cudnn_gbn")
if check_cudnn_version_and_warn("--cudnn_gbn", 8500):
subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/cudnn-frontend/"])
ext_modules.append(
CUDAExtension(
name="cudnn_gbn_lib",
sources=[
"apex/contrib/csrc/cudnn_gbn/norm_sample.cpp",
"apex/contrib/csrc/cudnn_gbn/cudnn_gbn.cpp",
],
include_dirs=[os.path.join(this_dir, "apex/contrib/csrc/cudnn-frontend/include")],
extra_compile_args={"cxx": ["-O3", "-g"] + version_dependent_macros + generator_flag},
)
)
if "--peer_memory" in sys.argv:
sys.argv.remove("--peer_memory")
raise_if_cuda_home_none("--peer_memory")
ext_modules.append(
CUDAExtension(
name="peer_memory_cuda",
sources=[
"apex/contrib/csrc/peer_memory/peer_memory_cuda.cu",
"apex/contrib/csrc/peer_memory/peer_memory.cpp",
],
extra_compile_args={"cxx": ["-O3"] + version_dependent_macros + generator_flag},
)
)
# NOTE: Requires NCCL >= 2.10.3
if "--nccl_p2p" in sys.argv:
sys.argv.remove("--nccl_p2p")
raise_if_cuda_home_none("--nccl_p2p")
# Check NCCL version.
_nccl_version_getter = load(
name="_nccl_version_getter",
sources=["apex/contrib/csrc/nccl_p2p/nccl_version.cpp", "apex/contrib/csrc/nccl_p2p/nccl_version_check.cu"],
)
_available_nccl_version = _nccl_version_getter.get_nccl_version()
if _available_nccl_version >= (2, 10):
ext_modules.append(
CUDAExtension(
name="nccl_p2p_cuda",
sources=[
"apex/contrib/csrc/nccl_p2p/nccl_p2p_cuda.cu",
"apex/contrib/csrc/nccl_p2p/nccl_p2p.cpp",
],
extra_compile_args={"cxx": ["-O3"] + version_dependent_macros + generator_flag},
)
)
else:
warnings.warn(
f"Skip `--nccl_p2p` as it requires NCCL 2.10.3 or later, but {_available_nccl_version[0]}.{_available_nccl_version[1]}"
)
# note (mkozuki): Now `--fast_bottleneck` option (i.e. apex/contrib/bottleneck) depends on `--peer_memory` and `--nccl_p2p`.
if "--fast_bottleneck" in sys.argv:
sys.argv.remove("--fast_bottleneck")
raise_if_cuda_home_none("--fast_bottleneck")
if check_cudnn_version_and_warn("--fast_bottleneck", 8400):
subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/cudnn-frontend/"])
ext_modules.append(
CUDAExtension(
name="fast_bottleneck",
sources=["apex/contrib/csrc/bottleneck/bottleneck.cpp"],
include_dirs=[os.path.join(this_dir, "apex/contrib/csrc/cudnn-frontend/include")],
extra_compile_args={"cxx": ["-O3"] + version_dependent_macros + generator_flag},
)
)
if "--fused_conv_bias_relu" in sys.argv:
sys.argv.remove("--fused_conv_bias_relu")
raise_if_cuda_home_none("--fused_conv_bias_relu")
if check_cudnn_version_and_warn("--fused_conv_bias_relu", 8400):
subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/cudnn-frontend/"])
ext_modules.append(
CUDAExtension(
name="fused_conv_bias_relu",
sources=["apex/contrib/csrc/conv_bias_relu/conv_bias_relu.cpp"],
include_dirs=[os.path.join(this_dir, "apex/contrib/csrc/cudnn-frontend/include")],
extra_compile_args={"cxx": ["-O3"] + version_dependent_macros + generator_flag},
)
)
setup(
name="apex",
version="0.1",
packages=find_packages(
exclude=("build", "csrc", "include", "tests", "dist", "docs", "tests", "examples", "apex.egg-info",)
),
install_requires=["packaging>20.6"],
description="PyTorch Extensions written by NVIDIA",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
extras_require=extras,
)
|
apex-master
|
setup.py
|
import logging
import warnings
# May help avoid undefined symbol errors https://pytorch.org/cppdocs/notes/faq.html#undefined-symbol-errors-from-pytorch-aten
import torch
__all__ = ["amp", "fp16_utils", "optimizers", "normalization", "transformer"]
if torch.distributed.is_available():
from . import parallel
__all__.append("parallel")
from . import amp
from . import fp16_utils
# For optimizers and normalization there is no Python fallback.
# Absence of cuda backend is a hard error.
# I would like the errors from importing fused_adam_cuda or fused_layer_norm_cuda
# to be triggered lazily, because if someone has installed with --cpp_ext and --cuda_ext
# so they expect those backends to be available, but for some reason they actually aren't
# available (for example because they built improperly in a way that isn't revealed until
# load time) the error message is timely and visible.
from . import optimizers
from . import normalization
from . import transformer
# Logging utilities for apex.transformer module
class RankInfoFormatter(logging.Formatter):
def format(self, record):
from apex.transformer.parallel_state import get_rank_info
record.rank_info = get_rank_info()
return super().format(record)
_library_root_logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(RankInfoFormatter("%(asctime)s - PID:%(process)d - rank:%(rank_info)s - %(filename)s:%(lineno)d - %(levelname)s - %(message)s", "%y-%m-%d %H:%M:%S"))
_library_root_logger.addHandler(handler)
_library_root_logger.propagate = False
def check_cudnn_version_and_warn(global_option: str, required_cudnn_version: int) -> bool:
cudnn_available = torch.backends.cudnn.is_available()
cudnn_version = torch.backends.cudnn.version() if cudnn_available else None
if not (cudnn_available and (cudnn_version >= required_cudnn_version)):
warnings.warn(
f"`{global_option}` depends on cuDNN {required_cudnn_version} or later, "
f"but {'cuDNN is not available' if not cudnn_available else cudnn_version}"
)
return False
return True
class DeprecatedFeatureWarning(FutureWarning):
pass
def deprecated_warning(msg: str) -> None:
if (
not torch.distributed.is_available
or not torch.distributed.is_initialized()
or (torch.distributed.is_initialized() and torch.distributed.get_rank() == 0)
):
warnings.warn(msg, DeprecatedFeatureWarning)
|
apex-master
|
apex/__init__.py
|
from typing import Optional, Sequence
import torch
__all__ = ["_cast_if_autocast_enabled"]
def _get_autocast_dtypes() -> Sequence[torch.dtype]:
if torch.cuda.is_bf16_supported():
return [torch.half, torch.bfloat16]
return [torch.half]
def _get_current_dtype(dtype: Optional[torch.dtype] = None) -> torch.dtype:
if not torch.is_autocast_enabled():
return torch.float or dtype
else:
return torch.get_autocast_gpu_dtype()
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
|
apex-master
|
apex/_autocast_utils.py
|
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
import syncbn
from .optimized_sync_batchnorm_kernel import SyncBatchnormFunction
class SyncBatchNorm(_BatchNorm):
"""
synchronized batch normalization module extented from `torch.nn.BatchNormNd`
with the added stats reduction across multiple processes.
:class:`apex.parallel.SyncBatchNorm` is designed to work with
`DistributedDataParallel`.
When running in training mode, the layer reduces stats across all processes
to increase the effective batchsize for normalization layer. This is useful
in applications where batch size is small on a given process that would
diminish converged accuracy of the model. The model uses collective
communication package from `torch.distributed`.
When running in evaluation mode, the layer falls back to
`torch.nn.functional.batch_norm`
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
process_group: pass in a process group within which the stats of the
mini-batch is being synchronized. ``None`` for using default process
group
channel_last: a boolean value that when set to ``True``, this module
take the last dimension of the input tensor to be the channel
dimension. Default: False
Examples::
>>> # channel first tensor
>>> sbn = apex.parallel.SyncBatchNorm(100).cuda()
>>> inp = torch.randn(10, 100, 14, 14).cuda()
>>> out = sbn(inp)
>>> inp = torch.randn(3, 100, 20).cuda()
>>> out = sbn(inp)
>>> # channel last tensor
>>> sbn = apex.parallel.SyncBatchNorm(100, channel_last=True).cuda()
>>> inp = torch.randn(10, 14, 14, 100).cuda()
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, process_group=None, channel_last=False, fuse_relu=False):
super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.process_group = process_group
self.channel_last = channel_last
self.fuse_relu = fuse_relu
def _specify_process_group(self, process_group):
self.process_group = process_group
def _specify_channel_last(self, channel_last):
self.channel_last = channel_last
def forward(self, input, z = None):
# if input.dim() == 2, we switch to channel_last for efficient memory accessing
channel_last = self.channel_last if input.dim() != 2 else True
if not self.training and self.track_running_stats and not channel_last and not self.fuse_relu and z == None:
# fall back to pytorch implementation for inference
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)
else:
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None:
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else:
exponential_average_factor = self.momentum
return SyncBatchnormFunction.apply(input, z, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, channel_last, self.fuse_relu)
|
apex-master
|
apex/parallel/optimized_sync_batchnorm.py
|
import torch
from torch.autograd.function import Function
from apex.parallel import ReduceOp
class SyncBatchnormFunction(Function):
@staticmethod
def forward(ctx, input, weight, bias, running_mean, running_variance, eps, process_group, world_size):
torch.cuda.nvtx.range_push("sync_BN_fw")
# transpose it to channel last to support broadcasting for input with different rank
c_last_input = input.transpose(1, -1).contiguous().clone()
ctx.save_for_backward(c_last_input, weight, bias,
running_mean, running_variance)
ctx.eps = eps
ctx.process_group = process_group
ctx.world_size = world_size
c_last_input = (c_last_input - running_mean) / \
torch.sqrt(running_variance + eps)
if weight is not None:
c_last_input = c_last_input * weight
if bias is not None:
c_last_input = c_last_input + bias
torch.cuda.nvtx.range_pop()
return c_last_input.transpose(1, -1).contiguous().clone()
@staticmethod
def backward(ctx, grad_output):
torch.cuda.nvtx.range_push("sync_BN_bw")
# mini batch mean & var are calculated by forward path.
# mu = 1./N*np.sum(h, axis = 0)
# var = 1./N*np.sum((h-mu)**2, axis = 0)
c_last_input, weight, bias, running_mean, running_variance = ctx.saved_tensors
eps = ctx.eps
process_group = ctx.process_group
world_size = ctx.world_size
grad_input = grad_weight = grad_bias = None
num_features = running_mean.size()[0]
# transpose it to channel last to support broadcasting for input with different rank
torch.cuda.nvtx.range_push("carilli field")
c_last_grad = grad_output.transpose(1, -1).contiguous()
# squash non-channel dimension so we can easily calculate mean
c_grad = c_last_grad.view(-1, num_features).contiguous()
torch.cuda.nvtx.range_pop()
# calculate grad_input
if ctx.needs_input_grad[0]:
# dh = gamma * (var + eps)**(-1. / 2.) * (dy - np.mean(dy, axis=0)
# - (h - mu) * (var + eps)**(-1.0) * np.mean(dy * (h - mu), axis=0))
mean_dy = c_grad.mean(0)
mean_dy_xmu = (c_last_grad * (c_last_input -
running_mean)).view(-1, num_features).mean(0)
if torch.distributed.is_initialized():
torch.distributed.all_reduce(
mean_dy, ReduceOp.SUM, process_group)
mean_dy = mean_dy / world_size
torch.distributed.all_reduce(
mean_dy_xmu, ReduceOp.SUM, process_group)
mean_dy_xmu = mean_dy_xmu / world_size
c_last_grad_input = (c_last_grad - mean_dy - (c_last_input - running_mean) / (
running_variance + eps) * mean_dy_xmu) / torch.sqrt(running_variance + eps)
if weight is not None:
c_last_grad_input.mul_(weight)
grad_input = c_last_grad_input.transpose(1, -1).contiguous()
# calculate grad_weight
grad_weight = None
if weight is not None and ctx.needs_input_grad[1]:
# dgamma = np.sum((h - mu) * (var + eps)**(-1. / 2.) * dy, axis=0)
grad_weight = ((c_last_input - running_mean) / torch.sqrt(
running_variance + eps) * c_last_grad).view(-1, num_features).sum(0)
# calculate grad_bias
grad_bias = None
if bias is not None and ctx.needs_input_grad[2]:
# dbeta = np.sum(dy, axis=0)
grad_bias = c_grad.sum(0)
torch.cuda.nvtx.range_pop()
return grad_input, grad_weight, grad_bias, None, None, None, None, None
|
apex-master
|
apex/parallel/sync_batchnorm_kernel.py
|
import torch
if hasattr(torch.distributed, 'ReduceOp'):
ReduceOp = torch.distributed.ReduceOp
elif hasattr(torch.distributed, 'reduce_op'):
ReduceOp = torch.distributed.reduce_op
else:
ReduceOp = torch.distributed.deprecated.reduce_op
from .distributed import DistributedDataParallel, Reducer
# This is tricky because I'd like SyncBatchNorm to be exposed the same way
# for both the cuda-enabled and python-fallback versions, and I don't want
# to suppress the error information.
try:
import syncbn
from .optimized_sync_batchnorm import SyncBatchNorm
except ImportError as err:
from .sync_batchnorm import SyncBatchNorm
SyncBatchNorm.syncbn_import_error = err
def convert_syncbn_model(module, process_group=None, channel_last=False):
'''
Recursively traverse module and its children to replace all instances of
``torch.nn.modules.batchnorm._BatchNorm`` with :class:`apex.parallel.SyncBatchNorm`.
All ``torch.nn.BatchNorm*N*d`` wrap around
``torch.nn.modules.batchnorm._BatchNorm``, so this function lets you easily switch
to use sync BN.
Args:
module (torch.nn.Module): input module
Example::
>>> # model is an instance of torch.nn.Module
>>> import apex
>>> sync_bn_model = apex.parallel.convert_syncbn_model(model)
'''
from apex import deprecated_warning
deprecated_warning("apex.parallel.convert_syncbn_model is deprecated and will be removed by the end of February 2023. Use `torch.nn.SyncBatchNorm.convert_sync_batchnorm`.")
mod = module
if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm):
return module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
mod = SyncBatchNorm(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group, channel_last=channel_last)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
mod.num_batches_tracked = module.num_batches_tracked
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_syncbn_model(child,
process_group=process_group,
channel_last=channel_last))
# TODO(jie) should I delete model explicitly?
del module
return mod
def create_syncbn_process_group(group_size):
'''
Creates process groups to be used for syncbn of a give ``group_size`` and returns
process group that current GPU participates in.
``group_size`` must divide the total number of GPUs (world_size).
``group_size`` of 0 would be considered as =world_size. In this case ``None`` will be returned.
``group_size`` of 1 would be equivalent to using non-sync bn, but will still carry the overhead.
Args:
group_size (int): number of GPU's to collaborate for sync bn
Example::
>>> # model is an instance of torch.nn.Module
>>> import apex
>>> group = apex.parallel.create_syncbn_process_group(group_size)
'''
if group_size==0:
return None
world_size = torch.distributed.get_world_size()
assert(world_size >= group_size)
assert(world_size % group_size == 0)
group=None
for group_num in (range(world_size//group_size)):
group_ids = range(group_num*group_size, (group_num+1)*group_size)
cur_group = torch.distributed.new_group(ranks=group_ids)
if (torch.distributed.get_rank()//group_size == group_num):
group = cur_group
#can not drop out and return here, every process must go through creation of all subgroups
assert(group is not None)
return group
|
apex-master
|
apex/parallel/__init__.py
|
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn import functional as F
from .sync_batchnorm_kernel import SyncBatchnormFunction
from apex.parallel import ReduceOp
class SyncBatchNorm(_BatchNorm):
"""
synchronized batch normalization module extented from ``torch.nn.BatchNormNd``
with the added stats reduction across multiple processes.
:class:`apex.parallel.SyncBatchNorm` is designed to work with
``DistributedDataParallel``.
When running in training mode, the layer reduces stats across all processes
to increase the effective batchsize for normalization layer. This is useful
in applications where batch size is small on a given process that would
diminish converged accuracy of the model. The model uses collective
communication package from ``torch.distributed``.
When running in evaluation mode, the layer falls back to
``torch.nn.functional.batch_norm``.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Example::
>>> sbn = apex.parallel.SyncBatchNorm(100).cuda()
>>> inp = torch.randn(10, 100, 14, 14).cuda()
>>> out = sbn(inp)
>>> inp = torch.randn(3, 100, 20).cuda()
>>> out = sbn(inp)
"""
warned = False
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, process_group=None, channel_last=False):
from apex import deprecated_warning
deprecated_warning("apex.parallel.SyncBatchNorm is deprecated and will be removed by the end of February 2023. Use `torch.nn.SyncBatchNorm`.")
if channel_last == True:
raise AttributeError("channel_last is not supported by primitive SyncBatchNorm implementation. Try install apex with `--cuda_ext` if channel_last is desired.")
if not SyncBatchNorm.warned:
if hasattr(self, "syncbn_import_error"):
print("Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: ", self.syncbn_import_error)
else:
print("Warning: using Python fallback for SyncBatchNorm")
SyncBatchNorm.warned = True
super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.process_group = process_group
def _specify_process_group(self, process_group):
self.process_group = process_group
def forward(self, input):
torch.cuda.nvtx.range_push("sync_bn_fw_with_mean_var")
mean = None
var = None
cast = None
out = None
# casting to handle mismatch input type to layer type
if self.running_mean is not None:
if self.running_mean.dtype != input.dtype:
input = input.to(self.running_mean.dtype)
cast = input.dtype
elif self.weight is not None:
if self.weight.dtype != input.dtype:
input = input.to(self.weight.dtype)
cast = input.dtype
if not self.training and self.track_running_stats:
# fall back to pytorch implementation for inference
torch.cuda.nvtx.range_pop()
out = F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps)
else:
process_group = self.process_group
world_size = 1
if not self.process_group:
process_group = torch.distributed.group.WORLD
self.num_batches_tracked += 1
with torch.no_grad():
channel_first_input = input.transpose(0, 1).contiguous()
squashed_input_tensor_view = channel_first_input.view(
channel_first_input.size(0), -1)
# total number of data points for each variance entry. Used to calculate unbiased variance estimate
m = None
local_m = float(squashed_input_tensor_view.size()[1])
local_mean = torch.mean(squashed_input_tensor_view, 1)
local_sqr_mean = torch.pow(
squashed_input_tensor_view, 2).mean(1)
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size(process_group)
torch.distributed.all_reduce(
local_mean, ReduceOp.SUM, process_group)
mean = local_mean / world_size
torch.distributed.all_reduce(
local_sqr_mean, ReduceOp.SUM, process_group)
sqr_mean = local_sqr_mean / world_size
m = local_m * world_size
else:
m = local_m
mean = local_mean
sqr_mean = local_sqr_mean
# var(x) = E (( x - mean_x ) ** 2)
# = 1 / N * sum ( x - mean_x ) ** 2
# = 1 / N * sum (x**2) - mean_x**2
var = sqr_mean - mean.pow(2)
if self.running_mean is not None:
self.running_mean = self.momentum * mean + \
(1 - self.momentum) * self.running_mean
if self.running_var is not None:
# as noted by the paper, we used unbiased variance estimate of the mini-batch
# Var[x] = m / (m-1) * Eb (sample_variance)
self.running_var = m / \
(m-1) * self.momentum * var + \
(1 - self.momentum) * self.running_var
torch.cuda.nvtx.range_pop()
out = SyncBatchnormFunction.apply(input, self.weight, self.bias, mean, var, self.eps, process_group, world_size)
return out.to(cast)
|
apex-master
|
apex/parallel/sync_batchnorm.py
|
from collections import OrderedDict
import copy
import importlib
from itertools import chain
import torch
import torch.distributed as dist
from torch.nn.modules import Module
from torch.autograd import Variable
from ..multi_tensor_apply import multi_tensor_applier
imported_flatten_impl = False
def import_flatten_impl():
global flatten_impl, unflatten_impl, imported_flatten_impl
try:
import apex_C
flatten_impl = apex_C.flatten
unflatten_impl = apex_C.unflatten
except ImportError:
print("Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.")
flatten_impl = torch._utils._flatten_dense_tensors
unflatten_impl = torch._utils._unflatten_dense_tensors
imported_flatten_impl = True
def flatten(bucket):
if not imported_flatten_impl:
import_flatten_impl()
return flatten_impl(bucket)
def unflatten(coalesced, bucket):
if not imported_flatten_impl:
import_flatten_impl()
return unflatten_impl(coalesced, bucket)
# apply_dist_call requires that tensors in 'bucket' are all the same type.
def apply_flat_dist_call(bucket, call, extra_args=None):
coalesced = flatten(bucket)
if extra_args is not None:
call(coalesced, *extra_args)
else:
call(coalesced)
if call is dist.all_reduce:
coalesced /= dist.get_world_size()
for buf, synced in zip(bucket, unflatten(coalesced, bucket)):
buf.copy_(synced)
def split_half_float_double(tensors):
dtypes = ["torch.cuda.HalfTensor", "torch.cuda.FloatTensor", "torch.cuda.DoubleTensor"]
buckets = []
for i, dtype in enumerate(dtypes):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append(bucket)
return buckets
def split_by_type(tensors):
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
return buckets
# flat_dist_call organizes 'tensors' by type.
def flat_dist_call(tensors, call, extra_args=None):
buckets = split_by_type(tensors)
for tp in buckets:
bucket = buckets[tp]
apply_flat_dist_call(bucket, call, extra_args)
def extract_tensors(maybe_tensor, tensor_list):
if torch.is_tensor(maybe_tensor):
tensor_list.append(maybe_tensor)
else:
try:
for item in maybe_tensor:
extract_tensors(item, tensor_list)
except TypeError:
return
class Reducer(object):
"""
:class:`apex.parallel.Reducer` is a simple class that helps allreduce a module's parameters
across processes. :class:`Reducer` is intended to give the user additional control:
Unlike :class:`DistributedDataParallel`, :class:`Reducer` will not automatically allreduce
parameters during ``backward()``.
Instead, :class:`Reducer` waits for the user to call ``<reducer_instance>.reduce()`` manually.
This enables, for example, delaying the allreduce to be carried out every
several iterations instead of every single iteration.
Like :class:`DistributedDataParallel`, :class:`Reducer` averages any tensors it allreduces
over the number of participating processes.
:class:`Reducer` is designed to work with the upstream launch utility script
``torch.distributed.launch`` with ``--nproc_per_node <= number of gpus per node``.
When used with this launcher, :class:`Reducer` assumes 1:1 mapping of processes to GPUs.
It also assumes that your script calls ``torch.cuda.set_device(args.rank)`` before creating the model.
Args:
module_or_grads_list: Either a network definition (module) being run in multi-gpu/distributed mode, or an iterable of gradients to be reduced. If a module is passed in, the Reducer constructor will sync the parameters across processes (broadcasting from rank 0) to make sure they're all initialized with the same values. If a list of gradients (that came from some module) is passed in, the user is responsible for manually syncing that module's parameters at the beginning of training.
"""
def __init__(self, module_or_grads_list):
if isinstance(module_or_grads_list, Module):
self.module = module_or_grads_list
flat_dist_call([param.data for param in self.module.parameters()], dist.broadcast, (0,) )
else:
self.module = None
self.grads = []
extract_tensors(module_or_grads_list, self.grads)
def reduce(self):
if self.module:
grads = [param.grad.data for param in self.module.parameters() if param.grad is not None]
flat_dist_call(grads, dist.all_reduce)
else:
flat_dist_call(self.grads, dist.all_reduce)
class DistributedDataParallel(Module):
"""
:class:`apex.parallel.DistributedDataParallel` is a module wrapper that enables
easy multiprocess distributed data parallel training, similar to ``torch.nn.parallel.DistributedDataParallel``. Parameters are broadcast across participating processes on initialization, and gradients are
allreduced and averaged over processes during ``backward()``.
:class:`DistributedDataParallel` is optimized for use with NCCL. It achieves high performance by
overlapping communication with computation during ``backward()`` and bucketing smaller gradient
transfers to reduce the total number of transfers required.
:class:`DistributedDataParallel` is designed to work with the upstream launch utility script
``torch.distributed.launch`` with ``--nproc_per_node <= number of gpus per node``.
When used with this launcher, :class:`DistributedDataParallel` assumes 1:1 mapping of processes to GPUs.
It also assumes that your script calls ``torch.cuda.set_device(args.rank)`` before creating the model.
https://github.com/NVIDIA/apex/tree/master/examples/simple/distributed shows detailed usage.
https://github.com/NVIDIA/apex/tree/master/examples/imagenet shows another example
that combines :class:`DistributedDataParallel` with mixed precision training.
Args:
module: Network definition to be run in multi-gpu/distributed mode.
message_size (int, default=1e7): Minimum number of elements in a communication bucket.
delay_allreduce (bool, default=False): Delay all communication to the end of the backward pass. This disables overlapping communication with computation.
allreduce_trigger_params (list, optional, default=None): If supplied, should contain a list of parameters drawn from the model. Allreduces will be kicked off whenever one of these parameters receives its gradient (as opposed to when a bucket of size message_size is full). At the end of backward(), a cleanup allreduce to catch any remaining gradients will also be performed automatically. If allreduce_trigger_params is supplied, the message_size argument will be ignored.
allreduce_always_fp32 (bool, default=False): Convert any FP16 gradients to FP32 before allreducing. This can improve stability for widely scaled-out runs.
gradient_average (bool, default=True): Option to toggle whether or not DDP averages the allreduced gradients over processes. For proper scaling, the default value of True is recommended.
gradient_predivide_factor (float, default=1.0): Allows perfoming the average of gradients over processes partially before and partially after the allreduce. Before allreduce: ``grads.mul_(1.0/gradient_predivide_factor)``. After allreduce: ``grads.mul_(gradient_predivide_factor/world size)``. This can reduce the stress on the dynamic range of FP16 allreduces for widely scaled-out runs.
.. warning::
If ``gradient_average=False``, the pre-allreduce division (``grads.mul_(1.0/gradient_predivide_factor)``) will still be applied, but the post-allreduce gradient averaging (``grads.mul_(gradient_predivide_factor/world size)``) will be omitted.
"""
def __init__(self,
module,
message_size=10000000,
delay_allreduce=False,
shared_param=None,
allreduce_trigger_params=None,
retain_allreduce_buffers=False,
allreduce_always_fp32=False,
num_allreduce_streams=1,
allreduce_communicators=None,
gradient_average=True,
gradient_predivide_factor=1.0,
gradient_average_split_factor=None,
prof=False):
super(DistributedDataParallel, self).__init__()
from apex import deprecated_warning
deprecated_warning("apex.parallel.DistributedDataParallel is deprecated and will be removed by the end of February 2023.")
# Backward/forward compatibility around
# https://github.com/pytorch/pytorch/commit/540ef9b1fc5506369a48491af8a285a686689b36 and
# https://github.com/pytorch/pytorch/commit/044d00516ccd6572c0d6ab6d54587155b02a3b86
if hasattr(dist, "get_backend"):
self._backend = dist.get_backend()
if hasattr(dist, "DistBackend"):
self.backend_enum_holder = dist.DistBackend
else:
self.backend_enum_holder = dist.Backend
else:
self._backend = dist._backend
self.backend_enum_holder = dist.dist_backend
self.warn_on_half = True if self._backend == self.backend_enum_holder.GLOO else False
self.prof = prof
self.allreduce_different_streams = (num_allreduce_streams > 1)
self.num_allreduce_streams = num_allreduce_streams
self.allreduce_communicators = allreduce_communicators
if self.allreduce_communicators:
assert len(allreduce_communicators[0]) == num_allreduce_streams
assert len(allreduce_communicators[0]) == len(allreduce_communicators[1])
assert self.allreduce_different_streams
if self.allreduce_different_streams and delay_allreduce:
raise ValueError("self.allreduce_different_streams may only be used if delay_allreduce=False.")
if shared_param is not None:
raise ValueError("shared_param is no longer supported as an option. It was misleadingly named from the start. It turns out overlapping communication with computation should work fine with shared parameters. If you still wish to delay communication to the end of the backward pass, use delay_allreduce=True|False instead.")
self.world_size = float(dist.get_world_size())
self.retain_allreduce_buffers = retain_allreduce_buffers
self.allreduce_always_fp32 = allreduce_always_fp32
self.gradient_average = gradient_average
self.gradient_predivide_factor = gradient_predivide_factor
self.custom_allreduce_triggers = False
if allreduce_trigger_params is not None:
if delay_allreduce:
raise ValueError("Setting allreduce_trigger_params is only valid if delay_allreduce=False.")
self.custom_allreduce_triggers = True
self.allreduce_trigger_params = set([id(param) for param in allreduce_trigger_params])
self.delay_allreduce = delay_allreduce
self.message_size = message_size
self.main_stream = torch.cuda.current_stream()
self.bucket_streams = []
self.bucket_events = []
self.module = module
self._disable_allreduce = False
if self._backend == self.backend_enum_holder.NCCL:
for param in self.module.parameters():
assert param.is_cuda, "NCCL backend only supports model parameters to be on GPU."
self.active_params = []
self.param_type_to_tmp_i = {"torch.cuda.HalfTensor" : 0,
"torch.cuda.FloatTensor" : 1,
"torch.cuda.DoubleTensor" : 2}
if multi_tensor_applier.available:
# TODO: I really need to centralize the C++ backed imports
import amp_C
self.multi_tensor_scale = amp_C.multi_tensor_scale
self._overflow_buf = torch.cuda.IntTensor([0])
self.create_hooks()
flat_dist_call([param.data for param in self.module.parameters()], dist.broadcast, (0,) )
def __setstate__(self, state):
super(DistributedDataParallel, self).__setstate__(state)
if self.allreduce_different_streams and delay_allreduce:
raise ValueError("self.allreduce_different_streams may only be used if delay_allreduce=False.")
if self.delay_allreduce:
self.needs_refresh = True
self.bucket_streams = []
self.bucket_events = []
def __getstate__(self):
attrs = copy.copy(self.__dict__)
if self._backend != self.backend_enum_holder.NCCL:
del attrs['self.bucket_streams']
del attrs['self.bucket_events']
return attrs
def enable_allreduce(self):
self._disable_allreduce = False
def disable_allreduce(self):
self._disable_allreduce = True
# Broadcast rank 0's bucket structure across all processes, and have all processes
# regenerate their bucket structures to match.
def sync_bucket_structure(self):
# Append leftover buckets
for tmp_bucket in self.tmp_buckets:
if len(tmp_bucket) > 0:
self.active_i_buckets.append(tmp_bucket)
self.num_buckets = len(self.active_i_buckets)
self.bucket_sizes = [len(bucket) for bucket in self.active_i_buckets]
info_tensor = torch.cuda.IntTensor([self.num_buckets] +
self.bucket_sizes +
list(chain(*self.active_i_buckets)))
dist.broadcast(info_tensor, 0)
info = [int(entry) for entry in info_tensor]
self.num_buckets = info[0]
self.bucket_sizes = info[1:self.num_buckets + 1]
self.buckets = [[None for _ in range(self.bucket_sizes[i])]
for i in range(self.num_buckets)]
# Technically, active_i_buckets' work is done. But the information is still useful to
# keep around. Therefore, refresh active_i_buckets based on rank 0 as well.
self.active_i_buckets = [[None for _ in range(self.bucket_sizes[i])]
for i in range(self.num_buckets)]
flattened_buckets = info[self.num_buckets + 1:]
flat_i = 0
for bucket_idx in range(self.num_buckets):
for bucket_loc in range(self.bucket_sizes[bucket_idx]):
param_i = flattened_buckets[flat_i]
self.active_i_buckets[bucket_idx][bucket_loc] = param_i
self.param_id_to_bucket[id(self.active_params[param_i])] = (bucket_idx, bucket_loc)
flat_i += 1
def create_hooks(self):
# Fallback hook that's only called at the end of backward.
# Used if you deliberately want to delay allreduces to the end, or to refresh the
# bucket structure that will be used to overlap communication with computation in later
# iterations.
def allreduce_params():
# Bucket record refresh
if not self.delay_allreduce:
if self.needs_refresh:
self.sync_bucket_structure()
self.needs_refresh = False
self.allreduce_fallback()
def overlapping_backward_epilogue():
for stream, event in zip(self.bucket_streams, self.bucket_events):
stream.record_event(event)
torch.cuda.current_stream().wait_event(event)
# Sanity checks that all the buckets were kicked off
if self.next_bucket != self.num_buckets:
raise RuntimeError("In epilogue, next_bucket ({}) != num_buckets ({}). ".format(
self.next_bucket, self.num_buckets),
"This probably indicates some buckets were not allreduced.")
for actual, expected in zip(self.buckets_ready_size, self.bucket_sizes):
if actual != expected:
raise RuntimeError("Some param buckets were not allreduced.")
self.grad_accs = []
for param in self.module.parameters():
if param.requires_grad:
def wrapper(param):
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
def allreduce_hook(*unused):
if self.prof:
torch.cuda.nvtx.range_push("allreduce_hook")
if not self._disable_allreduce:
if self.delay_allreduce or self.needs_refresh:
# TODO: How do we want to handle multiple backward passes between
# each forward, e.g., backward passes with retain_graph=True?
# needs_refresh and callback_queued are both vulnerable states.
if not self.delay_allreduce and self.needs_refresh:
# Use the backward pass to build the bucket structure on the fly.
active_i = self.param_id_to_active_i[id(param)]
# Float, half, and double tensors are grouped into buckets separately.
current_type = self.param_type_to_tmp_i[param.type()]
self.tmp_buckets[current_type].append(active_i)
ship_tmp_bucket = False
if self.custom_allreduce_triggers:
if id(param) in self.allreduce_trigger_params:
ship_tmp_bucket = True
else:
self.tmp_numels[current_type] += param.numel()
if self.tmp_numels[current_type] >= self.message_size:
ship_tmp_bucket = True
# To consider: If custom_allreduce_triggers are in use, ship all
# tmp_buckets, not just tmp_buckets[current_type].
if ship_tmp_bucket:
self.active_i_buckets.append(self.tmp_buckets[current_type])
self.tmp_buckets[current_type] = []
self.tmp_numels[current_type] = 0
if not self.callback_queued:
Variable._execution_engine.queue_callback(allreduce_params)
self.callback_queued = True
else:
if not self.callback_queued:
Variable._execution_engine.queue_callback(overlapping_backward_epilogue)
self.callback_queued = True
self.comm_ready_buckets(param)
if self.prof:
torch.cuda.nvtx.range_pop()
grad_acc.register_hook(allreduce_hook)
self.grad_accs.append(grad_acc)
wrapper(param)
def _stream_this_bucket(self, bucket_idx):
if self.allreduce_different_streams:
return self.bucket_streams[bucket_idx%self.num_allreduce_streams]
else:
return self.bucket_streams[0]
def _event_this_bucket(self, bucket_idx):
if self.allreduce_different_streams:
return self.bucket_events[bucket_idx%self.num_allreduce_streams]
else:
return self.bucket_events[0]
def allreduce_bucket(self, bucket, bucket_idx, force_default_stream):
tensor = flatten(bucket)
if force_default_stream:
bucket_stream = self.main_stream
else:
bucket_stream = self._stream_this_bucket(bucket_idx)
bucket_event = self._event_this_bucket(bucket_idx)
torch.cuda.current_stream().record_event(bucket_event)
bucket_stream.wait_event(bucket_event)
with torch.cuda.stream(bucket_stream):
# self.main_stream.wait_stream(torch.cuda.current_stream())
# torch.cuda.synchronize()
tensor_to_allreduce = tensor
if self.allreduce_always_fp32:
tensor_to_allreduce = tensor.float()
if self.gradient_predivide_factor != 1.0:
tensor_to_allreduce.mul_(1./self.gradient_predivide_factor)
if self.allreduce_different_streams and not force_default_stream:
dist.all_reduce(tensor_to_allreduce, group=self.bucket_pgs[bucket_idx%self.num_allreduce_streams])
else:
dist.all_reduce(tensor_to_allreduce)
if self.gradient_average:
tensor_to_allreduce.mul_(self.gradient_predivide_factor/self.world_size)
if self.allreduce_always_fp32 and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
if not self.retain_allreduce_buffers:
if multi_tensor_applier.available:
multi_tensor_applier(
self.multi_tensor_scale,
self._overflow_buf,
[unflatten(tensor, bucket), bucket],
1.0)
else:
for buf, synced in zip(bucket, unflatten(tensor, bucket)):
buf.copy_(synced)
# I think we actually do need this here. After allreduce_bucket returns, tensor will
# eventually go out of scope and die, at which point it could otherwise be freed for
# further reuse by the main stream while the allreduce/div/unflatten are underway in bucket_stream.
tensor.record_stream(bucket_stream)
return tensor
def allreduce_maybe_retain(self, bucket, bucket_idx, force_default_stream=False):
allreduced = self.allreduce_bucket(bucket, bucket_idx, force_default_stream)
if self.retain_allreduce_buffers:
if self.allreduce_buffers[bucket_idx] is not None:
raise RuntimeError("The backward pass is attempting to replace an already-filled "
"allreduce buffer. This is almost certainly an error.")
self.allreduce_buffers[bucket_idx] = allreduced
for view, grad in zip(unflatten(allreduced, bucket), bucket):
grad.data = view
# for buf, synced in zip(bucket, unflatten(allreduced, bucket)):
# buf.copy_(synced)
def allreduce_fallback(self):
for stream, event in zip(self.bucket_streams, self.bucket_events):
stream.record_event(event)
torch.cuda.current_stream().wait_event(event)
if self.retain_allreduce_buffers:
grads = [param.grad for param in self.module.parameters() if param.grad is not None]
else:
grads = [param.grad.data for param in self.module.parameters() if param.grad is not None]
split_buckets = split_half_float_double(grads)
# If retain_allreduce_buffers is True and delay_allreduce is False,
# this will only be done during the first backward pass, ignored by the
# training script, and overwritten in the next forward pass. So it's harmless.
if self.retain_allreduce_buffers:
self.allreduce_buffers = [None for _ in range(len(split_buckets))]
for i, bucket in enumerate(split_buckets):
allreduced = self.allreduce_maybe_retain(bucket, i, force_default_stream=True)
def comm_ready_buckets(self, param):
# Need to do this in every hook for compatibility with Ruberry's streaming backward PR.
# self.reduction_stream.wait_stream(torch.cuda.current_stream())
if self.prof:
torch.cuda.nvtx.range_push("comm_ready_buckets")
bucket_idx, bucket_loc = self.param_id_to_bucket[id(param)]
if self.buckets[bucket_idx][bucket_loc] is not None:
raise RuntimeError("The backward pass is attempting to replace an already-filled "
"bucket slot. This is almost certainly an error.")
if self.retain_allreduce_buffers:
self.buckets[bucket_idx][bucket_loc] = param.grad
else:
self.buckets[bucket_idx][bucket_loc] = param.grad.data
self.buckets_ready_size[bucket_idx] += 1
if self.buckets_ready_size[bucket_idx] == self.bucket_sizes[bucket_idx]:
if bucket_idx == self.next_bucket:
self.allreduce_maybe_retain(self.buckets[bucket_idx], bucket_idx)
self.next_bucket += 1
# Reversing upstream's logic here, because we constructed our buckets based on
# the order things were received during backward.
if len(self.ready_buckets_not_reduced) > 0:
sorted_todo = sorted(self.ready_buckets_not_reduced)
for i in sorted_todo:
# Nothing can be reduced now
if i > self.next_bucket:
break
elif i == self.next_bucket:
self.allreduce_maybe_retain(self.buckets[i], i)
self.ready_buckets_not_reduced.remove(i)
self.next_bucket += 1
else:
raise ValueError("i should always be >= next_bucket")
else:
self.ready_buckets_not_reduced.add(bucket_idx)
if self.prof:
torch.cuda.nvtx.range_pop()
def forward(self, *inputs, **kwargs):
result = self.module(*inputs, **kwargs)
if self.prof:
torch.cuda.nvtx.range_push("forward pass DDP logic")
if not self._disable_allreduce:
if not self.delay_allreduce:
param_list = [param for param in self.module.parameters() if param.requires_grad]
# Conditions under which to refresh self.record
# Forward has the authority to set needs_refresh to True, but only allreduce_params
# in backward has the authority to set needs_refresh to False.
# Parentheses are not necessary for correct order of operations, but make the intent clearer.
if ((not self.active_params) or
(len(param_list) != len(self.active_params)) or
any([param1 is not param2 for param1, param2 in zip(param_list, self.active_params)])):
self.needs_refresh = True
if self.needs_refresh:
self.active_i_buckets = []
self.buckets = []
self.tmp_buckets = [[], [], []] # [running half, float, double buckets]
self.tmp_numels = [0, 0, 0]
self.bucket_sizes = []
self.param_id_to_active_i = {id(param) : i for i, param in enumerate(param_list)}
self.param_id_to_bucket = {}
self.bucket_pgs = []
self.bucket_streams = []
self.bucket_events = []
else:
# self.buckets = [[None for _ in range(self.bucket_sizes[i])]
# for i in range(self.num_buckets)]
if not self.buckets:
self.buckets = [[None for _ in range(self.bucket_sizes[i])]
for i in range(self.num_buckets)]
else:
assert len(self.buckets) == self.num_buckets, "len(buckets) = {}, expected {}".format(
len(self.buckets), self.num_buckets)
for b, bucket in enumerate(self.buckets):
assert len(bucket) == self.bucket_sizes[b], "len(buckets[{}]) = {}, expected {})".format(
b, len(buckets[b]), self.bucket_sizes[b])
for i in range(len(bucket)):
bucket[i] = None
if self.allreduce_communicators:
self.bucket_pgs = self.allreduce_communicators[0]
self.bucket_streams = self.allreduce_communicators[1]
self.bucket_events = [torch.cuda.Event(enable_timing=False,
blocking=False) for _ in range(self.num_allreduce_streams)]
else:
if self.allreduce_different_streams:
if not self.bucket_pgs:
self.bucket_pgs = [dist.new_group() for _ in range(self.num_allreduce_streams)]
for i, bg in enumerate(self.bucket_pgs):
print("rank {} created group {} with backend {}".format(
dist.get_rank(), i, dist.get_backend(bg)))
if self.allreduce_different_streams:
if not self.bucket_streams:
self.bucket_streams = [torch.cuda.Stream() for _ in range(self.num_allreduce_streams)]
self.bucket_events = [torch.cuda.Event(enable_timing=False,
blocking=False) for _ in range(self.num_allreduce_streams)]
else:
if not self.bucket_streams:
self.bucket_streams = [torch.cuda.Stream()]
self.bucket_events = [torch.cuda.Event(enable_timing=False, blocking=False)]
self.buckets_ready_size = [0 for i in range(self.num_buckets)]
if(self.retain_allreduce_buffers):
self.allreduce_buffers = [None for _ in range(self.num_buckets)]
self.next_bucket = 0
self.ready_buckets_not_reduced = set()
self.active_params = param_list
self.callback_queued = False
if self.prof:
torch.cuda.nvtx.range_pop()
return result
|
apex-master
|
apex/parallel/distributed.py
|
import torch
from torch.autograd.function import Function
import syncbn
from apex.parallel import ReduceOp
class SyncBatchnormFunction(Function):
@staticmethod
def forward(ctx, input, z, weight, bias, running_mean, running_variance, eps, track_running_stats = True, momentum = 1.0, process_group = None, channel_last = False, fuse_relu = False):
input = input.contiguous()
world_size = 0
mean = None
var_biased = None
inv_std = None
var = None
out = None
count = None
if track_running_stats:
if channel_last:
count = int(input.numel()/input.size(-1))
mean, var_biased = syncbn.welford_mean_var_c_last(input)
num_channels = input.size(-1)
else:
count = int(input.numel()/input.size(1))
mean, var_biased = syncbn.welford_mean_var(input)
num_channels = input.size(1)
if torch.distributed.is_initialized():
if not process_group:
process_group = torch.distributed.group.WORLD
device = mean.device
world_size = torch.distributed.get_world_size(process_group)
count_t = torch.empty(1, dtype=mean.dtype, device=mean.device).fill_(count)
combined = torch.cat([mean.view(-1), var_biased.view(-1), count_t], dim=0)
combined_list = [torch.empty_like(combined) for k in range(world_size)]
torch.distributed.all_gather(combined_list, combined, process_group)
combined = torch.stack(combined_list, dim=0)
mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
count_all = count_all.view(-1)
mean, var, inv_std = syncbn.welford_parallel(mean_all, invstd_all, count_all.to(torch.int32), eps)
else:
device = mean.device
count_all = torch.cuda.IntTensor([count], device=device)
inv_std = 1.0 / torch.sqrt(var_biased + eps)
var = var_biased * (count) / (count-1)
if count == 1 and world_size < 2:
raise ValueError('Expected more than 1 value per channel when training, got input size{}'.format(input.size()))
r_m_inc = mean if running_mean.dtype != torch.float16 else mean.half()
r_v_inc = var if running_variance.dtype != torch.float16 else var.half()
running_mean.data = running_mean.data * (1-momentum) + momentum*r_m_inc
running_variance.data = running_variance.data * (1-momentum) + momentum*r_v_inc
else:
mean = running_mean.data
inv_std = 1.0 / torch.sqrt(running_variance.data + eps)
ctx.save_for_backward(input, weight, mean, inv_std, z, bias, count_all.to(torch.int32))
ctx.process_group = process_group
ctx.channel_last = channel_last
ctx.world_size = world_size
ctx.fuse_relu = fuse_relu
if channel_last:
out = syncbn.batchnorm_forward_c_last(input, z, mean, inv_std, weight, bias, fuse_relu)
else:
out = syncbn.batchnorm_forward(input, mean, inv_std, weight, bias)
return out
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
# mini batch mean & var are calculated by forward path.
# mu = 1./N*np.sum(h, axis = 0)
# var = 1./N*np.sum((h-mu)**2, axis = 0)
saved_input, weight, mean, inv_std, z, bias, count = ctx.saved_tensors
process_group = ctx.process_group
channel_last = ctx.channel_last
world_size = ctx.world_size
fuse_relu = ctx.fuse_relu
grad_input = grad_z = grad_weight = grad_bias = None
if fuse_relu:
grad_output = syncbn.relu_bw_c_last(grad_output, saved_input, z, mean, inv_std, weight, bias)
if isinstance(z, torch.Tensor) and ctx.needs_input_grad[1]:
grad_z = grad_output.clone()
# TODO: update kernel to not pre_divide by item_num
if channel_last:
sum_dy, sum_dy_xmu, grad_weight, grad_bias = syncbn.reduce_bn_c_last(grad_output, saved_input, mean, inv_std, weight)
else:
sum_dy, sum_dy_xmu, grad_weight, grad_bias = syncbn.reduce_bn(grad_output, saved_input, mean, inv_std, weight)
# calculate grad_input
if ctx.needs_input_grad[0]:
if torch.distributed.is_initialized():
num_channels = sum_dy.shape[0]
combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
torch.distributed.all_reduce(
combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
if channel_last:
grad_input = syncbn.batchnorm_backward_c_last(grad_output, saved_input, mean, inv_std, weight, sum_dy, sum_dy_xmu, count)
else:
grad_input = syncbn.batchnorm_backward(grad_output, saved_input, mean, inv_std, weight, sum_dy, sum_dy_xmu, count)
if weight is None or not ctx.needs_input_grad[2]:
grad_weight = None
if weight is None or not ctx.needs_input_grad[3]:
grad_bias = None
return grad_input, grad_z, grad_weight, grad_bias, None, None, None, None, None, None, None, None
|
apex-master
|
apex/parallel/optimized_sync_batchnorm_kernel.py
|
import torch
from torch import nn
from torch.nn.parameter import Parameter
class LARC(object):
"""
:class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC,
in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive
local learning rate for each individual parameter. The algorithm is designed to improve
convergence of large batch training.
See https://arxiv.org/abs/1708.03888 for calculation of the local learning rate.
In practice it modifies the gradients of parameters as a proxy for modifying the learning rate
of the parameters. This design allows it to be used as a wrapper around any torch.optim Optimizer.
```
model = ...
optim = torch.optim.Adam(model.parameters(), lr=...)
optim = LARC(optim)
```
It can even be used in conjunction with apex.fp16_utils.FP16_optimizer.
```
model = ...
optim = torch.optim.Adam(model.parameters(), lr=...)
optim = LARC(optim)
optim = apex.fp16_utils.FP16_Optimizer(optim)
```
Args:
optimizer: Pytorch optimizer to wrap and modify learning rate for.
trust_coefficient: Trust coefficient for calculating the lr. See https://arxiv.org/abs/1708.03888
clip: Decides between clipping or scaling mode of LARC. If `clip=True` the learning rate is set to `min(optimizer_lr, local_lr)` for each parameter. If `clip=False` the learning rate is set to `local_lr*optimizer_lr`.
eps: epsilon kludge to help with numerical stability while calculating adaptive_lr
"""
def __init__(self, optimizer, trust_coefficient=0.02, clip=True, eps=1e-8):
self.optim = optimizer
self.trust_coefficient = trust_coefficient
self.eps = eps
self.clip = clip
def __getstate__(self):
return self.optim.__getstate__()
def __setstate__(self, state):
self.optim.__setstate__(state)
@property
def state(self):
return self.optim.state
def __repr__(self):
return self.optim.__repr__()
@property
def param_groups(self):
return self.optim.param_groups
@param_groups.setter
def param_groups(self, value):
self.optim.param_groups = value
def state_dict(self):
return self.optim.state_dict()
def load_state_dict(self, state_dict):
self.optim.load_state_dict(state_dict)
def zero_grad(self):
self.optim.zero_grad()
def add_param_group(self, param_group):
self.optim.add_param_group( param_group)
def step(self):
with torch.no_grad():
weight_decays = []
for group in self.optim.param_groups:
# absorb weight decay control from optimizer
weight_decay = group['weight_decay'] if 'weight_decay' in group else 0
weight_decays.append(weight_decay)
group['weight_decay'] = 0
for p in group['params']:
if p.grad is None:
continue
param_norm = torch.norm(p.data)
grad_norm = torch.norm(p.grad.data)
if param_norm != 0 and grad_norm != 0:
# calculate adaptive lr + weight decay
adaptive_lr = self.trust_coefficient * (param_norm) / (grad_norm + param_norm * weight_decay + self.eps)
# clip learning rate for LARC
if self.clip:
# calculation of adaptive_lr so that when multiplied by lr it equals `min(adaptive_lr, lr)`
adaptive_lr = min(adaptive_lr/group['lr'], 1)
p.grad.data += weight_decay * p.data
p.grad.data *= adaptive_lr
self.optim.step()
# return weight decay control to optimizer
for i, group in enumerate(self.optim.param_groups):
group['weight_decay'] = weight_decays[i]
|
apex-master
|
apex/parallel/LARC.py
|
import torch
import sys
import subprocess
def docstring_hack():
"""
Multiproc file which will launch a set of processes locally for multi-gpu
usage: python -m apex.parallel.multiproc main.py ...
"""
pass
argslist = list(sys.argv)[1:]
world_size = torch.cuda.device_count()
if '--world-size' in argslist:
world_size = int(argslist[argslist.index('--world-size')+1])
else:
argslist.append('--world-size')
argslist.append(str(world_size))
workers = []
for i in range(world_size):
if '--rank' in argslist:
argslist[argslist.index('--rank')+1] = str(i)
else:
argslist.append('--rank')
argslist.append(str(i))
stdout = None if i == 0 else open("GPU_"+str(i)+".log", "w")
print(argslist)
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
for p in workers:
p.wait()
|
apex-master
|
apex/parallel/multiproc.py
|
import importlib
import numbers
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn import functional as F
from apex._autocast_utils import _cast_if_autocast_enabled
global fused_layer_norm_cuda
fused_layer_norm_cuda = None
# Reference implementation from Huggingface
def manual_rms_norm(input, normalized_shape, weight, eps):
# layer norm should always be calculated in float32
dims = tuple(i for i in range(-1, -len(normalized_shape)-1, -1))
variance = input.to(torch.float32).pow(2).mean(dims, keepdim=True)
input = input * torch.rsqrt(variance + eps)
if weight is None:
return input
# convert into half-precision if necessary
if weight.dtype in [torch.float16, torch.bfloat16]:
input = input.to(weight.dtype)
return weight * input
class FusedLayerNormAffineFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
bias_ = bias.contiguous()
output, mean, invvar = fused_layer_norm_cuda.forward_affine(
input_, ctx.normalized_shape, weight_, bias_, ctx.eps
)
ctx.save_for_backward(input_, weight_, bias_, mean, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, weight_, bias_, mean, invvar = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
grad_input, grad_weight, grad_bias = fused_layer_norm_cuda.backward_affine(
grad_output.contiguous(), mean, invvar, input_, ctx.normalized_shape, weight_, bias_, ctx.eps
)
return grad_input, grad_weight, grad_bias, None, None
class FusedRMSNormAffineFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
output, invvar = fused_layer_norm_cuda.rms_forward_affine(
input_, ctx.normalized_shape, weight_, ctx.eps)
ctx.save_for_backward(input_, weight_, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, weight_, invvar = ctx.saved_tensors
grad_input = grad_weight = None
grad_input, grad_weight = fused_layer_norm_cuda.rms_backward_affine(
grad_output.contiguous(), invvar, input_, ctx.normalized_shape, weight_, ctx.eps
)
return grad_input, grad_weight, None, None
class FusedLayerNormAffineMixedDtypesFunction(FusedLayerNormAffineFunction):
@staticmethod
def forward(ctx, input, weight, bias, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
bias_ = bias.contiguous()
output, mean, invvar = fused_layer_norm_cuda.forward_affine_mixed_dtypes(
input_, ctx.normalized_shape, weight_, bias_, ctx.eps
)
ctx.save_for_backward(input_, weight_, bias_, mean, invvar)
return output
class FusedRMSNormAffineMixedDtypesFunction(FusedRMSNormAffineFunction):
@staticmethod
def forward(ctx, input, weight, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
weight_ = weight.contiguous()
output, invvar = fused_layer_norm_cuda.rms_forward_affine_mixed_dtypes(
input_, ctx.normalized_shape, weight_, ctx.eps
)
ctx.save_for_backward(input_, weight_, invvar)
return output
class FusedLayerNormFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
output, mean, invvar = fused_layer_norm_cuda.forward(input_, ctx.normalized_shape, ctx.eps)
ctx.save_for_backward(input_, mean, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, mean, invvar = ctx.saved_tensors
grad_input = None
grad_input = fused_layer_norm_cuda.backward(
grad_output.contiguous(), mean, invvar, input_, ctx.normalized_shape, ctx.eps
)
return grad_input, None, None
class FusedRMSNormFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, normalized_shape, eps):
global fused_layer_norm_cuda
if fused_layer_norm_cuda is None:
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
ctx.normalized_shape = normalized_shape
ctx.eps = eps
input_ = input.contiguous()
output, invvar = fused_layer_norm_cuda.rms_forward(input_, ctx.normalized_shape, ctx.eps)
ctx.save_for_backward(input_, invvar)
return output
@staticmethod
def backward(ctx, grad_output):
input_, invvar = ctx.saved_tensors
grad_input = None
grad_input = fused_layer_norm_cuda.rms_backward(
grad_output.contiguous(), invvar, input_, ctx.normalized_shape, ctx.eps
)
return grad_input, None, None
def fused_layer_norm_affine(input, weight, bias, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, weight, bias, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedLayerNormAffineFunction.apply(*args)
def fused_layer_norm(input, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedLayerNormFunction.apply(*args)
def mixed_dtype_fused_layer_norm_affine(input, weight, bias, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, weight, bias, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedLayerNormAffineMixedDtypesFunction.apply(*args)
def fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormAffineFunction.apply(*args)
def fused_rms_norm(input, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormFunction.apply(*args)
def mixed_dtype_fused_rms_norm_affine(input, weight, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, weight, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormAffineMixedDtypesFunction.apply(*args)
class FusedLayerNorm(torch.nn.Module):
r"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
Currently only runs on cuda() tensors.
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized}\_\text{shape}[0] \times \text{normalized}\_\text{shape}[1]
\times \ldots \times \text{normalized}\_\text{shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = apex.normalization.FusedLayerNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = apex.normalization.FusedLayerNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = apex.normalization.FusedLayerNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = apex.normalization.FusedLayerNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super().__init__()
global fused_layer_norm_cuda
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.empty(*normalized_shape))
self.bias = Parameter(torch.empty(*normalized_shape))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
if torch.jit.is_tracing() or torch.jit.is_scripting() or not input.is_cuda:
return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)
if self.elementwise_affine:
return fused_layer_norm_affine(input, self.weight, self.bias, self.normalized_shape, self.eps)
else:
return fused_layer_norm(input, self.normalized_shape, self.eps)
def extra_repr(self):
return "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__)
class FusedRMSNorm(torch.nn.Module):
r"""Applies RMS Normalization over a mini-batch of inputs
Currently only runs on cuda() tensors.
.. math::
y = \frac{x}{\mathrm{RMS}[x]} * \gamma
The root-mean-square is calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\gamma` is a learnable affine transform parameter of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
`epsilon` is added to the mean-square, then the root of the sum is taken.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, RMS Normalization applies per-element scale
with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized}\_\text{shape}[0] \times \text{normalized}\_\text{shape}[1]
\times \ldots \times \text{normalized}\_\text{shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = apex.normalization.FusedRMSNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = apex.normalization.FusedRMSNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = apex.normalization.FusedRMSNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = apex.normalization.FusedRMSNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Root Mean Square Layer Normalization`: https://arxiv.org/pdf/1910.07467.pdf
"""
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super().__init__()
global fused_layer_norm_cuda
fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda")
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = torch.Size(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.empty(*normalized_shape))
else:
self.register_parameter("weight", None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
def forward(self, input):
if torch.jit.is_tracing() or torch.jit.is_scripting() or not input.is_cuda:
return manual_rms_norm(input, self.normalized_shape, self.weight, self.eps)
if self.elementwise_affine:
return fused_rms_norm_affine(input, self.weight, self.normalized_shape, self.eps)
else:
return fused_rms_norm(input, self.normalized_shape, self.eps)
def extra_repr(self):
return "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__)
# NOTE (mkozuki): Why "mixed"?
# MixedFusedLayerNorm differs from FusedLayerNorm in that this layer norm uses parameter's dtype
# as output tensor's dtype while FusedLayerNorm uses input tensor's dtype for output tensor's dtype.
# See: `layer_norm_affine` and `layer_norm_affine_mixed_dtypes` in "csrc/layer_norm_cuda.cpp"
class MixedFusedLayerNorm(FusedLayerNorm):
def __init__(self, normalized_shape, eps=1e-5, **kwargs):
if "elementwise_affine" in kwargs:
import warnings
warnings.warn("MixedFusedLayerNorm does not support `elementwise_affine` argument")
elementwise_affine = kwargs.pop("elementwise_affine")
if not elementwise_affine:
raise RuntimeError("MixedFusedLayerNorm does not support `elementwise_affine = False`")
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=True)
def forward(self, input: torch.Tensor):
# NOTE (mkozuki): CPU path is here mainly for unittest sake.
if torch.jit.is_tracing() or torch.jit.is_scripting() or not input.is_cuda:
return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)
return mixed_dtype_fused_layer_norm_affine(input, self.weight, self.bias, self.normalized_shape, self.eps)
# MixedFusedLayerNorm differs from FusedLayerNorm in that this layer norm uses parameter's dtype
# as output tensor's dtype while FusedLayerNorm uses input tensor's dtype for output tensor's dtype.
# See: `layer_norm_affine` and `layer_norm_affine_mixed_dtypes` in "csrc/layer_norm_cuda.cpp"
class MixedFusedRMSNorm(FusedRMSNorm):
def __init__(self, normalized_shape, eps=1e-5, **kwargs):
if "elementwise_affine" in kwargs:
import warnings
warnings.warn("MixedFusedRMSNorm does not support `elementwise_affine` argument")
elementwise_affine = kwargs.pop("elementwise_affine")
if not elementwise_affine:
raise RuntimeError("MixedFusedRMSNorm does not support `elementwise_affine = False`")
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=True)
def forward(self, input: torch.Tensor):
# NOTE (mkozuki): CPU path is here mainly for unittest sake.
# TODO Manual RMS Norm Implementation Here
if torch.jit.is_tracing() or torch.jit.is_scripting() or not input.is_cuda:
return manual_rms_norm(input, self.normalized_shape, self.weight, self.eps)
return mixed_dtype_fused_rms_norm_affine(input, self.weight, self.normalized_shape, self.eps)
|
apex-master
|
apex/normalization/fused_layer_norm.py
|
from .fused_layer_norm import FusedLayerNorm, MixedFusedLayerNorm, FusedRMSNorm, MixedFusedRMSNorm
|
apex-master
|
apex/normalization/__init__.py
|
from .fused_dense import *
|
apex-master
|
apex/fused_dense/__init__.py
|
import torch
from torch import nn
import fused_dense_cuda
from apex._autocast_utils import _cast_if_autocast_enabled
#implements fused GEMM+bias in forward pass using mlp_cuda from apex
class FusedDenseFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
ctx.save_for_backward(input, weight)
output = fused_dense_cuda.linear_bias_forward(input, weight, bias)
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input, grad_weight, grad_bias = fused_dense_cuda.linear_bias_backward(input, weight, grad_output)
return grad_input, grad_weight, grad_bias
class DenseNoBiasFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight):
ctx.save_for_backward(input, weight)
output = torch.matmul(input, weight.t())
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
grad_input = grad_output.mm(weight)
grad_weight = grad_output.t().mm(input)
return grad_input, grad_weight
class FusedDenseGeluDenseFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight1, bias1, weight2, bias2):
ctx.save_for_backward(input, weight1, weight2)
output1, output2, gelu_in = fused_dense_cuda.linear_gelu_linear_forward(input, weight1, bias1, weight2, bias2)
ctx.save_for_backward(input, weight1, weight2, gelu_in, output1)
return output2
@staticmethod
def backward(ctx, grad_output):
input, weight1, weight2, gelu_in, output1 = ctx.saved_tensors
grad_input, grad_weight1, grad_bias1, grad_weight2, grad_bias2 = fused_dense_cuda.linear_gelu_linear_backward(input, gelu_in, output1, weight1, weight2, grad_output)
return grad_input, grad_weight1, grad_bias1, grad_weight2, grad_bias2
def _fused_dense(input, weight, bias):
args = _cast_if_autocast_enabled(input, weight, bias)
with torch.cuda.amp.autocast(enabled=False):
return FusedDenseFunc.apply(*args)
def _dense_no_bias(input, weight):
args = _cast_if_autocast_enabled(input, weight)
with torch.cuda.amp.autocast(enabled=False):
return DenseNoBiasFunc.apply(*args)
def _fused_dense_gelu_dense(input, weight1, bias1, weight2, bias2):
args = _cast_if_autocast_enabled(input, weight1, bias1, weight2, bias2)
with torch.cuda.amp.autocast(enabled=False):
return FusedDenseGeluDenseFunc.apply(*args)
class FusedDense(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(FusedDense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.empty(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.empty(out_features))
else:
#assert False, "no-bias option not added yet"
self.register_parameter('bias', None)
def forward(self, input):
if self.bias is not None:
return _fused_dense(input, self.weight, self.bias)
else:
return _dense_no_bias(input, self.weight)
class FusedDenseGeluDense(nn.Module):
def __init__(self, in_features, intermediate_features, out_features, bias=True):
super(FusedDenseGeluDense, self).__init__()
assert bias == True, "DenseGeluDense module without bias is currently not supported"
self.in_features = in_features
self.intermediate_features = intermediate_features
self.out_features = out_features
self.weight1 = nn.Parameter(torch.empty(intermediate_features, in_features))
self.bias1 = nn.Parameter(torch.empty(intermediate_features))
self.weight2 = nn.Parameter(torch.empty(out_features, intermediate_features))
self.bias2 = nn.Parameter(torch.empty(out_features))
def forward(self, input):
return _fused_dense_gelu_dense(input, self.weight1, self.bias1, self.weight2, self.bias2)
|
apex-master
|
apex/fused_dense/fused_dense.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class LayerType(enum.Enum):
encoder = 1
decoder = 2
class AttnType(enum.Enum):
self_attn = 1
cross_attn = 2
class AttnMaskType(enum.Enum):
padding = 1
causal = 2
class ModelType(enum.Enum):
encoder_or_decoder = 1
encoder_and_decoder = 2
|
apex-master
|
apex/transformer/enums.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO (mkozuki): Replace assert with RuntimeError.
# TODO (mkozuki): Sort the functions in the same order of megatron/mpu/initialize.py
"""Model and data parallel groups."""
from typing import Tuple, Optional
import warnings
import os
import torch
from apex.transformer.log_util import get_transformer_logger
from apex.transformer._ucc_util import HAS_UCC
_logger = get_transformer_logger(__name__)
# N.B. (mkozuki): Diff btwn Megatron-LM & apex parallel_state
# set(megatron_mpu_initialize_funcs) - set(apex.transformer.parallel_state) =
# {
# 'get_num_layers',
# }
# Intra-layer model parallel group that the current rank belongs to.
_TENSOR_MODEL_PARALLEL_GROUP = None
# Inter-layer model parallel group that the current rank belongs to.
_PIPELINE_MODEL_PARALLEL_GROUP = None
# Model parallel group (both intra- and pipeline) that the current rank belongs to.
_MODEL_PARALLEL_GROUP = None
# Embedding group.
_EMBEDDING_GROUP = None
# Position embedding group.
_POSITION_EMBEDDING_GROUP = None
# Relative position embedding group.
_ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP = None
_DECODER_RELATIVE_POSITION_EMBEDDING_GROUP = None
# Data parallel group that the current rank belongs to.
_DATA_PARALLEL_GROUP = None
# Data parallel AMAX reduction group that the current rank belongs to.
_AMAX_REDUCTION_GROUP = None
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
_PIPELINE_MODEL_PARALLEL_SPLIT_RANK = None
# These values enable us to change the mpu sizes on the fly.
_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None
_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
_MPU_TENSOR_MODEL_PARALLEL_RANK = None
_MPU_PIPELINE_MODEL_PARALLEL_RANK = None
# A list of ranks that have a copy of the embedding.
_EMBEDDING_GLOBAL_RANKS = None
# A list of ranks that have a copy of the position embedding.
_POSITION_EMBEDDING_GLOBAL_RANKS = None
# A list of ranks that have a copy of the relative position embedding.
_ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS = None
_DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS = None
# A list of global ranks for each pipeline group to ease calculation of the source
# rank when broadcasting from the first or last pipeline stage
_PIPELINE_GLOBAL_RANKS = None
def is_unitialized():
"""Useful for code segments that may be accessed with or without mpu initialization"""
return _DATA_PARALLEL_GROUP is None
def set_nccl_socket_envs():
if os.getenv("NCCL_SOCKET_IFNAME") is None:
raise RuntimeError("NCCL_SOCKET_IFNAME was not set")
os.environ["NCCL_NET"] = "Socket"
def set_nccl_ib_envs():
os.environ["NCCL_NET"] = "IB"
def init_nccl_net(group):
temp = torch.ones(1, device="cuda")
torch.distributed.all_reduce(temp, group=group)
torch.cuda.synchronize()
def new_nccl_socket_group(ranks):
set_nccl_socket_envs()
group = torch.distributed.new_group(ranks, backend="nccl")
init_nccl_net(group=group)
return group
def new_nccl_ib_group(ranks):
set_nccl_ib_envs()
group = torch.distributed.new_group(ranks, backend="nccl")
init_nccl_net(group=group)
return group
def new_process_group(ranks, backend):
"""
This function creates process groups.
In addition to simply creating the process groups, it initializes NCCL
for hybrid IB/Socket network like in the following diagram:
____________
[GPU Node 0]---TCP---| |---TCP---[GPU Node 2]
| | | |
| | | |
IB | IP Network | IB
| | | |
| | | |
[GPU Node 1]---TCP---|____________|---TCP---[GPU Node 3]
If an environment variable NUM_GPUS_PER_IB_BLOCK is defined it looks up the ranks
and determines whether the list of ranks belong to the same computational block where
GPUs nodes are interconnected via IB type of connection or not.
If all ranks are in the same block, the process group will use NCCL_NET=IB for
communication, otherwise it will use NCCL_NET=Socket.
If NCCL_NET=Socket is ever to be used, the user must set NCCL_SOCKET_IFNAME.
Additionally, it is recommended to set NCCL_SOCKET_NTHREADS and
NCCL_NSOCKS_PERTHREAD before running the job.
See: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/env.html
for more info
The core assumption for this functionality is that the ranks are evenly divided
into IB blocks and all these IB blocks are of the same size.
"""
if backend is None:
backend = "nccl"
compute_block_size = os.getenv("NUM_GPUS_PER_IB_BLOCK")
if backend == "nccl" and compute_block_size is not None:
compute_block_size = int(compute_block_size)
blocks = [rank // compute_block_size for rank in ranks]
use_ib = all(block == blocks[0] for block in blocks)
if use_ib:
return new_nccl_ib_group(ranks)
else:
return new_nccl_socket_group(ranks)
else:
return torch.distributed.new_group(ranks, backend=backend)
def initialize_model_parallel(
tensor_model_parallel_size_: int = 1,
pipeline_model_parallel_size_: int = 1,
virtual_pipeline_model_parallel_size_: Optional[int] = None,
pipeline_model_parallel_split_rank_: Optional[int] = None,
use_fp8_: bool = False,
init_mpi_proc_group: bool = False,
*,
default_backend: Optional[str] = None,
p2p_backend: Optional[str] = None,
) -> None:
"""
Initialize model data parallel groups.
Arguments:
tensor_model_parallel_size: number of GPUs used to parallelize model tensor.
pipeline_model_parallel_size: number of GPUs used to parallelize model pipeline.
virtual_pipeline_model_parallel_size: number of virtual stages (interleaved pipeline).
pipeline_model_parallel_split_rank: for models with both encoder and decoder, rank in pipeline with split point.
use_fp8_: FP8 training that needs AMAX reduction across data-parallel ranks.
init_mpi_proc_group: Create a MPI process group, which is used for UCX-based communication APIs.
Keyword Arguments:
default_backend: Backend of process groups except for pipeline parallel ones.
If :obj:`None`, the backend specified in `torch.distributed.init_process_group` will be used.
p2p_backend: Backend of process groups for pipeline model parallel.
If :obj:`None`, the backend specified in `torch.distributed.init_process_group` will be used.
.. note::
`torch_ucc <https://github.com/facebookresearch/torch_ucc>`_ is
necessary for "ucc" backend.
Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
the model pipeline. The present function will
create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
and 8 data-parallel groups as:
8 data_parallel groups:
[g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
8 tensor model-parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
4 pipeline model-parallel groups:
[g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
assert default_backend is None or default_backend in ("nccl", "ucc")
assert p2p_backend is None or p2p_backend in ("nccl", "ucc")
if "ucc" in (default_backend, p2p_backend):
if not HAS_UCC:
raise ImportError("UCC backend requires pytorch source build with UCC installed and enabled")
warnings.warn("`ucc` backend support is experimental", ExperimentalWarning)
if default_backend == "ucc":
warnings.warn("The UCC's functionality as `default_backend` is not well verified", ExperimentalWarning)
# Saving the NCCL_NET type for reusing it at the epilogue
default_nccl_net = os.getenv("NCCL_NET")
world_size: int = torch.distributed.get_world_size()
tensor_model_parallel_size: int = min(tensor_model_parallel_size_, world_size)
pipeline_model_parallel_size: int = min(pipeline_model_parallel_size_, world_size)
if world_size % (tensor_model_parallel_size * pipeline_model_parallel_size) != 0:
raise RuntimeError(
f"`world_size` ({world_size}) is not divisible by tensor_model_parallel_size ({tensor_model_parallel_size}) x pipeline_model_parallel_size ({pipeline_model_parallel_size})"
)
data_parallel_size: int = world_size // (
tensor_model_parallel_size * pipeline_model_parallel_size
)
if torch.distributed.get_rank() == 0:
_logger.info(
"> initializing tensor model parallel with size {}".format(
tensor_model_parallel_size
)
)
_logger.info(
"> initializing pipeline model parallel with size {}".format(
pipeline_model_parallel_size
)
)
_logger.info(
"> initializing data parallel with size {}".format(data_parallel_size)
)
num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size
num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size
num_data_parallel_groups: int = world_size // data_parallel_size
if virtual_pipeline_model_parallel_size_ is not None:
# n.b. (eqy) This check was inherited from Megatron-LM, need to revisit
# the root cause as we do see numerical mismatches with 2 stages and
# the interleaved schedule
assert pipeline_model_parallel_size_ > 2, (
"pipeline-model-parallel size should be greater than 2 with "
"interleaved schedule"
)
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = 0
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = (
virtual_pipeline_model_parallel_size_
)
if pipeline_model_parallel_split_rank_ is not None:
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
_PIPELINE_MODEL_PARALLEL_SPLIT_RANK = pipeline_model_parallel_split_rank_
rank = torch.distributed.get_rank()
# Build the data-parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized"
all_data_parallel_group_ranks = []
for i in range(pipeline_model_parallel_size):
start_rank = i * num_pipeline_model_parallel_groups
end_rank = (i + 1) * num_pipeline_model_parallel_groups
for j in range(tensor_model_parallel_size):
ranks = range(start_rank + j, end_rank, tensor_model_parallel_size)
all_data_parallel_group_ranks.append(list(ranks))
group = new_process_group(ranks, backend=default_backend)
if rank in ranks:
_DATA_PARALLEL_GROUP = group
# Build the amax-reduction groups for fp8 precision conversion.
if use_fp8_:
global _AMAX_REDUCTION_GROUP
assert _AMAX_REDUCTION_GROUP is None, "amax reduction group is already initialized"
amax_group_size: int = tensor_model_parallel_size * data_parallel_size
num_amax_groups: int = world_size // amax_group_size
for i in range(num_amax_groups):
start_rank = i * amax_group_size
end_rank = (i + 1) * amax_group_size
ranks = range(start_rank, end_rank)
group = torch.distributed.new_group(ranks, backend=default_backend)
if rank in ranks:
_AMAX_REDUCTION_GROUP = group
# Build the model-parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized"
for i in range(data_parallel_size):
ranks = [
data_parallel_group_ranks[i]
for data_parallel_group_ranks in all_data_parallel_group_ranks
]
group = new_process_group(ranks, backend=default_backend)
if rank in ranks:
_MODEL_PARALLEL_GROUP = group
# Build the tensor model-parallel groups.
global _TENSOR_MODEL_PARALLEL_GROUP
assert (
_TENSOR_MODEL_PARALLEL_GROUP is None
), "tensor model parallel group is already initialized"
for i in range(num_tensor_model_parallel_groups):
ranks = list(
range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
)
group = new_process_group(ranks, backend=default_backend)
if rank in ranks:
_TENSOR_MODEL_PARALLEL_GROUP = group
# Build the pipeline model-parallel groups and embedding groups
# (first and last rank in each pipeline model-parallel group).
global _PIPELINE_MODEL_PARALLEL_GROUP
global _PIPELINE_GLOBAL_RANKS
assert (
_PIPELINE_MODEL_PARALLEL_GROUP is None
), "pipeline model parallel group is already initialized"
global _EMBEDDING_GROUP
global _EMBEDDING_GLOBAL_RANKS
assert _EMBEDDING_GROUP is None, "embedding group is already initialized"
global _POSITION_EMBEDDING_GROUP
global _POSITION_EMBEDDING_GLOBAL_RANKS
assert (
_POSITION_EMBEDDING_GROUP is None
), "position embedding group is already initialized"
global _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP
global _DECODER_RELATIVE_POSITION_EMBEDDING_GROUP
global _ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
global _DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
assert _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP is None or \
_DECODER_RELATIVE_POSITION_EMBEDDING_GROUP is None, \
'relative position embedding group is already initialized'
for i in range(num_pipeline_model_parallel_groups):
ranks = range(i, world_size, num_pipeline_model_parallel_groups)
group = new_process_group(ranks, backend=p2p_backend)
if rank in ranks:
_PIPELINE_MODEL_PARALLEL_GROUP = group
_PIPELINE_GLOBAL_RANKS = ranks
# Setup embedding group (to exchange gradients between
# first and last stages).
encoder_relative_position_embedding_ranks = None
decoder_relative_position_embedding_ranks = None
if len(ranks) > 1:
embedding_ranks = [ranks[0], ranks[-1]]
position_embedding_ranks = [ranks[0]]
encoder_relative_position_embedding_ranks = [ranks[0]]
decoder_relative_position_embedding_ranks = [ranks[0]]
if pipeline_model_parallel_split_rank_ is not None:
encoder_relative_position_embedding_ranks = \
ranks[:pipeline_model_parallel_split_rank_]
decoder_relative_position_embedding_ranks = \
ranks[pipeline_model_parallel_split_rank_:]
if ranks[pipeline_model_parallel_split_rank_] not in embedding_ranks:
embedding_ranks = [
ranks[0],
ranks[pipeline_model_parallel_split_rank_],
ranks[-1],
]
if (
ranks[pipeline_model_parallel_split_rank_]
not in position_embedding_ranks
):
position_embedding_ranks = [
ranks[0],
ranks[pipeline_model_parallel_split_rank_],
]
else:
embedding_ranks = ranks
position_embedding_ranks = ranks
encoder_relative_position_embedding_ranks = ranks
decoder_relative_position_embedding_ranks = ranks
group = new_process_group(embedding_ranks, backend=p2p_backend)
if rank in embedding_ranks:
_EMBEDDING_GROUP = group
if rank in ranks:
_EMBEDDING_GLOBAL_RANKS = embedding_ranks
group = new_process_group(position_embedding_ranks, backend=p2p_backend)
if rank in position_embedding_ranks:
_POSITION_EMBEDDING_GROUP = group
if rank in ranks:
_POSITION_EMBEDDING_GLOBAL_RANKS = position_embedding_ranks
if encoder_relative_position_embedding_ranks:
group = new_process_group(encoder_relative_position_embedding_ranks, backend=p2p_backend)
if rank in encoder_relative_position_embedding_ranks:
_ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP = group
if rank in ranks:
_ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS = \
encoder_relative_position_embedding_ranks
if decoder_relative_position_embedding_ranks:
group = new_process_group(decoder_relative_position_embedding_ranks, backend=p2p_backend)
if rank in decoder_relative_position_embedding_ranks:
_DECODER_RELATIVE_POSITION_EMBEDDING_GROUP = group
if rank in ranks:
_DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS = \
decoder_relative_position_embedding_ranks
if init_mpi_proc_group:
torch.distributed.new_group(backend='mpi')
if default_nccl_net == "Socket":
set_nccl_socket_envs()
elif default_nccl_net == "IB":
set_nccl_ib_envs()
elif default_nccl_net is None:
os.unsetenv("NCCL_NET")
else:
os.environ["NCCL_NET"] = default_nccl_net
def get_rank_info() -> Tuple[int, int, int]:
"""Returns a tuple of (data, tensor, pipeline, virtual pipeline)-parallel-rank for logger."""
if model_parallel_is_initialized():
return (
get_data_parallel_rank(),
get_tensor_model_parallel_rank(),
get_pipeline_model_parallel_rank(),
get_virtual_pipeline_model_parallel_rank(),
)
return (0, 0, 0, 0)
def model_parallel_is_initialized():
"""Check if model and data parallel groups are initialized."""
if (
_TENSOR_MODEL_PARALLEL_GROUP is None
or _PIPELINE_MODEL_PARALLEL_GROUP is None
or _DATA_PARALLEL_GROUP is None
):
return False
return True
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, "model parallel group is not initialized"
return _MODEL_PARALLEL_GROUP
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert (
_TENSOR_MODEL_PARALLEL_GROUP is not None
), "intra_layer_model parallel group is not initialized"
return _TENSOR_MODEL_PARALLEL_GROUP
def get_pipeline_model_parallel_group():
"""Get the pipeline model parallel group the caller rank belongs to."""
assert (
_PIPELINE_MODEL_PARALLEL_GROUP is not None
), "pipeline_model parallel group is not initialized"
return _PIPELINE_MODEL_PARALLEL_GROUP
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, "data parallel group is not initialized"
return _DATA_PARALLEL_GROUP
def get_amax_reduction_group():
"""Get the amax reduction group the caller rank belongs to."""
assert _AMAX_REDUCTION_GROUP is not None, \
"AMAX reduction group is not initialized"
return _AMAX_REDUCTION_GROUP
def get_embedding_group():
"""Get the embedding group the caller rank belongs to."""
assert _EMBEDDING_GROUP is not None, "embedding group is not initialized"
return _EMBEDDING_GROUP
def get_position_embedding_group():
"""Get the position embedding group the caller rank belongs to."""
assert (
_POSITION_EMBEDDING_GROUP is not None
), "position embedding group is not initialized"
return _POSITION_EMBEDDING_GROUP
def get_encoder_relative_position_embedding_group():
"""Get the encoder relative position embedding group the caller rank belongs to."""
assert _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP is not None, \
'encoder relative position embedding group is not initialized'
return _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP
def get_decoder_relative_position_embedding_group():
"""Get the decoder relative position embedding group the caller rank belongs to."""
assert _DECODER_RELATIVE_POSITION_EMBEDDING_GROUP is not None, \
'decoder relative position embedding group is not initialized'
return _DECODER_RELATIVE_POSITION_EMBEDDING_GROUP
def is_rank_in_embedding_group(ignore_virtual=False):
"""Return true if current rank is in embedding group, False otherwise."""
rank = torch.distributed.get_rank()
global _EMBEDDING_GLOBAL_RANKS
if ignore_virtual:
return rank in _EMBEDDING_GLOBAL_RANKS
if rank in _EMBEDDING_GLOBAL_RANKS:
if rank == _EMBEDDING_GLOBAL_RANKS[0]:
return is_pipeline_first_stage(ignore_virtual=False)
elif rank == _EMBEDDING_GLOBAL_RANKS[-1]:
return is_pipeline_last_stage(ignore_virtual=False)
else:
return True
return False
def is_rank_in_position_embedding_group():
"""Return whether the current rank is in position embedding group."""
rank = torch.distributed.get_rank()
global _POSITION_EMBEDDING_GLOBAL_RANKS
return rank in _POSITION_EMBEDDING_GLOBAL_RANKS
def is_rank_in_encoder_relative_position_embedding_group():
"""Return true if current rank is in encoder relative position embedding group, False otherwise."""
rank = torch.distributed.get_rank()
global _ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
return rank in _ENCODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
def is_rank_in_decoder_relative_position_embedding_group():
"""Return true if current rank is in decoder relative position embedding group, False otherwise."""
rank = torch.distributed.get_rank()
global _DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
return rank in _DECODER_RELATIVE_POSITION_EMBEDDING_GLOBAL_RANKS
def is_pipeline_stage_before_split(rank=None):
"""Return True if pipeline stage executes encoder block for a model
with both encoder and decoder."""
if get_pipeline_model_parallel_world_size() == 1:
return True
if rank is None:
rank = get_pipeline_model_parallel_rank()
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None:
return True
if rank < _PIPELINE_MODEL_PARALLEL_SPLIT_RANK:
return True
return False
def is_pipeline_stage_after_split(rank=None):
"""Return True if pipeline stage executes decoder block for a model
with both encoder and decoder."""
if get_pipeline_model_parallel_world_size() == 1:
return True
if rank is None:
rank = get_pipeline_model_parallel_rank()
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None:
return True
if rank >= _PIPELINE_MODEL_PARALLEL_SPLIT_RANK:
return True
return False
def is_pipeline_stage_at_split():
"""Return true if pipeline stage executes decoder block and next
stage executes encoder block for a model with both encoder and
decoder."""
rank = get_pipeline_model_parallel_rank()
return is_pipeline_stage_before_split(rank) and is_pipeline_stage_after_split(
rank + 1
)
def set_tensor_model_parallel_world_size(world_size):
"""Set the tensor model parallel size"""
global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = world_size
def set_pipeline_model_parallel_world_size(world_size):
"""Set the pipeline model parallel size"""
global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size
def get_tensor_model_parallel_world_size():
"""Return world size for the tensor model parallel group."""
global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
if _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE is not None:
return _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
def get_pipeline_model_parallel_world_size():
"""Return world size for the pipeline model parallel group."""
global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
if _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE is not None:
return _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
return torch.distributed.get_world_size(group=get_pipeline_model_parallel_group())
def set_tensor_model_parallel_rank(rank):
"""Set tensor model parallel rank."""
global _MPU_TENSOR_MODEL_PARALLEL_RANK
_MPU_TENSOR_MODEL_PARALLEL_RANK = rank
def set_pipeline_model_parallel_rank(rank):
"""Set pipeline model parallel rank."""
global _MPU_PIPELINE_MODEL_PARALLEL_RANK
_MPU_PIPELINE_MODEL_PARALLEL_RANK = rank
def get_tensor_model_parallel_rank():
"""Return my rank for the tensor model parallel group."""
global _MPU_TENSOR_MODEL_PARALLEL_RANK
if _MPU_TENSOR_MODEL_PARALLEL_RANK is not None:
return _MPU_TENSOR_MODEL_PARALLEL_RANK
return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
def get_pipeline_model_parallel_rank():
"""Return my rank for the pipeline model parallel group."""
global _MPU_PIPELINE_MODEL_PARALLEL_RANK
if _MPU_PIPELINE_MODEL_PARALLEL_RANK is not None:
return _MPU_PIPELINE_MODEL_PARALLEL_RANK
return torch.distributed.get_rank(group=get_pipeline_model_parallel_group())
# TODO (mkozuki): Add [`get_num_layers`](https://github.com/NVIDIA/Megatron-LM/blob/e156d2fea7fc5c98e645f7742eb86b643956d840/megatron/mpu/initialize.py#L321) here, maybe?
def get_pipeline_model_parallel_split_rank():
"""Return my rank for the pipeline model parallel split rank."""
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
return _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
def set_pipeline_model_parallel_split_rank(pipeline_model_parallel_split_rank: int):
"""Set my rank for the pipeline model parallel split rank."""
global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
_PIPELINE_MODEL_PARALLEL_SPLIT_RANK = pipeline_model_parallel_split_rank
def is_pipeline_first_stage(ignore_virtual=False):
"""Return True if in the first pipeline model-parallel stage, False otherwise."""
if not ignore_virtual:
if (
get_virtual_pipeline_model_parallel_world_size() is not None
and get_virtual_pipeline_model_parallel_rank() != 0
):
return False
return get_pipeline_model_parallel_rank() == 0
def is_pipeline_last_stage(ignore_virtual=False):
"""Return True if in the last pipeline model-parallel stage, False otherwise."""
if not ignore_virtual:
virtual_pipeline_model_parallel_world_size = (
get_virtual_pipeline_model_parallel_world_size()
)
if virtual_pipeline_model_parallel_world_size is not None and get_virtual_pipeline_model_parallel_rank() != (
virtual_pipeline_model_parallel_world_size - 1
):
return False
return get_pipeline_model_parallel_rank() == (
get_pipeline_model_parallel_world_size() - 1
)
def get_virtual_pipeline_model_parallel_rank():
"""Return the virtual pipeline-parallel rank."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
return _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
def set_virtual_pipeline_model_parallel_rank(rank):
"""Set the virtual pipeline-parallel rank."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = rank
def get_virtual_pipeline_model_parallel_world_size():
"""Return the virtual pipeline-parallel world size."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
return _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
def set_virtual_pipeline_model_parallel_world_size(size):
"""Return the virtual pipeline-parallel world size."""
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = size
def get_tensor_model_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank
in the tensor model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_tensor_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
def get_data_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank in the data parallel group."""
global_rank = torch.distributed.get_rank()
data_parallel_size: int = get_data_parallel_world_size()
num_data_parallel_groups = torch.distributed.get_world_size() // data_parallel_size
return global_rank % num_data_parallel_groups
def get_pipeline_model_parallel_first_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
return _PIPELINE_GLOBAL_RANKS[0]
def get_pipeline_model_parallel_last_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
last_rank_local = get_pipeline_model_parallel_world_size() - 1
return _PIPELINE_GLOBAL_RANKS[last_rank_local]
def get_pipeline_model_parallel_next_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
rank_in_pipeline = get_pipeline_model_parallel_rank()
world_size = get_pipeline_model_parallel_world_size()
return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size]
def get_pipeline_model_parallel_prev_rank():
assert (
_PIPELINE_GLOBAL_RANKS is not None
), "Pipeline parallel group is not initialized"
rank_in_pipeline = get_pipeline_model_parallel_rank()
world_size = get_pipeline_model_parallel_world_size()
return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size]
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return torch.distributed.get_world_size(group=get_data_parallel_group())
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
# note (mkozuki): `destroy_model_parallel` voids more global variables than Megatron-LM.
# Otherwise pipeline parallel forward_backward functions test hangs possibly because
# the clean-up of the original is NOT enough.
def destroy_model_parallel():
"""Set the groups to none."""
global _MODEL_PARALLEL_GROUP
_MODEL_PARALLEL_GROUP = None
global _TENSOR_MODEL_PARALLEL_GROUP
_TENSOR_MODEL_PARALLEL_GROUP = None
global _PIPELINE_MODEL_PARALLEL_GROUP
_PIPELINE_MODEL_PARALLEL_GROUP = None
global _DATA_PARALLEL_GROUP
_DATA_PARALLEL_GROUP = None
global _AMAX_REDUCTION_GROUP
_AMAX_REDUCTION_GROUP = None
global _EMBEDDING_GROUP
_EMBEDDING_GROUP = None
global _POSITION_EMBEDDING_GROUP
_POSITION_EMBEDDING_GROUP = None
global _ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP
_ENCODER_RELATIVE_POSITION_EMBEDDING_GROUP = None
global _DECODER_RELATIVE_POSITION_EMBEDDING_GROUP
_DECODER_RELATIVE_POSITION_EMBEDDING_GROUP = None
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None
global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None
global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
global _MPU_TENSOR_MODEL_PARALLEL_RANK
_MPU_TENSOR_MODEL_PARALLEL_RANK = None
global _MPU_PIPELINE_MODEL_PARALLEL_RANK
_MPU_PIPELINE_MODEL_PARALLEL_RANK = None
# Used to warn when the UCC is specified.
class ExperimentalWarning(Warning): pass
|
apex-master
|
apex/transformer/parallel_state.py
|
import logging
import os
def get_transformer_logger(name: str) -> logging.Logger:
name_wo_ext = os.path.splitext(name)[0]
return logging.getLogger(name_wo_ext)
def set_logging_level(verbosity) -> None:
"""Change logging severity.
Args:
verbosity
"""
from apex import _library_root_logger
_library_root_logger.setLevel(verbosity)
|
apex-master
|
apex/transformer/log_util.py
|
from apex.transformer import amp
from apex.transformer import functional
from apex.transformer import parallel_state
from apex.transformer import pipeline_parallel
from apex.transformer import tensor_parallel
from apex.transformer import utils
from apex.transformer.enums import LayerType
from apex.transformer.enums import AttnType
from apex.transformer.enums import AttnMaskType
__all__ = [
"amp",
"functional",
"parallel_state",
"pipeline_parallel",
"tensor_parallel",
"utils",
# enums.py
"LayerType",
"AttnType",
"AttnMaskType",
]
|
apex-master
|
apex/transformer/__init__.py
|
from torch import distributed as dist
HAS_UCC = hasattr(dist, "is_ucc_available") and dist.is_ucc_available()
if not HAS_UCC:
try:
import torch_ucc
HAS_UCC = True
except ImportError:
HAS_UCC = False
|
apex-master
|
apex/transformer/_ucc_util.py
|
"""Utility functions used by both `pipeline_parallel` and `tensor_parallel`"""
import torch
from apex.transformer import parallel_state
# `all_gather_into_tensor` is new placeholders for `_all_gather_base`.
# It requires the most recent version of PyTorch.
# The following 4 lines are for backward comparability with
# older PyTorch.
if "all_gather_into_tensor" not in dir(torch.distributed):
torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, "{} is not divisible by {}".format(
numerator, denominator
)
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def split_tensor_into_1d_equal_chunks(tensor):
"""Break a tensor into equal 1D chunks."""
data = tensor.view(-1)
partition_size = (
torch.numel(data) // parallel_state.get_tensor_model_parallel_world_size()
)
start_index = partition_size * parallel_state.get_tensor_model_parallel_rank()
end_index = start_index + partition_size
return data[start_index:end_index]
def gather_split_1d_tensor(tensor):
"""Opposite of above function, gather values from model parallel ranks."""
world_size = parallel_state.get_tensor_model_parallel_world_size()
numel = torch.numel(tensor)
numel_gathered = world_size * numel
gathered = torch.empty(
numel_gathered,
dtype=tensor.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
torch.distributed.all_gather_into_tensor(
gathered,
tensor,
group=parallel_state.get_tensor_model_parallel_group()
)
return gathered
|
apex-master
|
apex/transformer/utils.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron number of micro-batches calculators."""
from abc import ABC
from abc import abstractmethod
from typing import Optional, List
from apex.transformer.log_util import get_transformer_logger
_logger = get_transformer_logger(__name__)
def build_num_microbatches_calculator(
rank: int,
rampup_batch_size: Optional[List[int]],
global_batch_size: int,
micro_batch_size: int,
data_parallel_size: int,
):
# Constant num micro-batches.
if rampup_batch_size is None:
num_microbatches_calculator = ConstantNumMicroBatches(
global_batch_size, micro_batch_size, data_parallel_size
)
if rank == 0:
_logger.info(
"setting number of micro-batches to constant {}".format(
num_microbatches_calculator.get()
)
)
else:
assert len(rampup_batch_size) == 3, (
"expected the following "
"format: --rampup-batch-size <start batch size> "
"<batch size incerement> <ramp-up samples>"
)
start_batch_size = int(rampup_batch_size[0])
batch_size_increment = int(rampup_batch_size[1])
ramup_samples = int(rampup_batch_size[2])
if rank == 0:
_logger.info(
"will use batch size rampup starting from global batch "
"size {} to global batch size {} with batch size increments "
"{} over {} samples.".format(
start_batch_size,
global_batch_size,
batch_size_increment,
ramup_samples,
)
)
num_microbatches_calculator = RampupBatchsizeNumMicroBatches(
start_batch_size,
batch_size_increment,
ramup_samples,
global_batch_size,
micro_batch_size,
data_parallel_size,
)
return num_microbatches_calculator
class NumMicroBatchesCalculator(ABC):
def __init__(self):
self.num_micro_batches = None
self.current_global_batch_size = None
def get(self):
return self.num_micro_batches
def get_current_global_batch_size(self):
return self.current_global_batch_size
@abstractmethod
def update(self, consumed_samples, consistency_check):
pass
class ConstantNumMicroBatches(NumMicroBatchesCalculator):
def __init__(self, global_batch_size, micro_batch_size, data_parallel_size):
micro_batch_times_data_parallel = micro_batch_size * data_parallel_size
assert global_batch_size % micro_batch_times_data_parallel == 0, (
"global batch size ({}) is not divisible by micro batch size ({})"
" times data parallel size ({})".format(
global_batch_size, micro_batch_size, data_parallel_size
)
)
self.num_micro_batches = global_batch_size // micro_batch_times_data_parallel
assert self.num_micro_batches >= 1
self.current_global_batch_size = global_batch_size
self.micro_batch_size = micro_batch_size
def update(self, consumed_samples, consistency_check):
pass
class RampupBatchsizeNumMicroBatches(NumMicroBatchesCalculator):
def __init__(
self,
start_batch_size,
batch_size_increment,
ramup_samples,
global_batch_size,
micro_batch_size,
data_parallel_size,
):
"""Batch size ramp up.
Over
steps = (global-batch-size - start-batch-size) / batch_size_increment
increment batch size from start-batch-size to global-batch-size using
rampup-samples / steps
samples.
Arguments:
start_batch_size: global batch size to start with
batch_size_increment: global batch size increments
ramup_samples: number of samples to use ramp up global
batch size from `start_batch_size` to `global_batch_size`
global_batch_size: global batch size post rampup
micro_batch_size: micro batch size
data_parallel_size: data parallel size.
"""
self.micro_batch_size = micro_batch_size
self.data_parallel_size = data_parallel_size
self.micro_batch_times_data_parallel_size = (
self.micro_batch_size * self.data_parallel_size
)
assert self.micro_batch_times_data_parallel_size > 0
assert start_batch_size > 0
self.start_batch_size = start_batch_size
assert global_batch_size > 0
self.global_batch_size = global_batch_size
diff_batch_size = self.global_batch_size - self.start_batch_size
assert diff_batch_size >= 0
assert batch_size_increment > 0
self.batch_size_increment = batch_size_increment
assert diff_batch_size % batch_size_increment == 0, (
"expected "
"global batch size interval ({}) to be divisible by global batch "
"size increment ({})".format(diff_batch_size, batch_size_increment)
)
num_increments = diff_batch_size // self.batch_size_increment
self.ramup_samples = ramup_samples
assert self.ramup_samples >= 0
self.rampup_samples_per_increment = self.ramup_samples / num_increments
# Initialize number of microbatches.
self.update(0, False)
def update(self, consumed_samples, consistency_check):
if consumed_samples > self.ramup_samples:
self.current_global_batch_size = self.global_batch_size
else:
steps = int(consumed_samples / self.rampup_samples_per_increment)
self.current_global_batch_size = (
self.start_batch_size + steps * self.batch_size_increment
)
assert self.current_global_batch_size <= self.global_batch_size
if consistency_check:
assert (
self.current_global_batch_size
% self.micro_batch_times_data_parallel_size
== 0
), (
"current global "
"batch size ({}) is not divisible by micro-batch-size ({}) times"
"data parallel size ({})".format(
self.current_global_batch_size,
self.micro_batch_size,
self.data_parallel_size,
)
)
self.num_micro_batches = (
self.current_global_batch_size // self.micro_batch_times_data_parallel_size
)
|
apex-master
|
apex/transformer/microbatches.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.transformer.tensor_parallel.utils import VocabUtility
class _VocabParallelCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits, target, label_smoothing=0.0):
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
torch.distributed.all_reduce(
logits_max, op=torch.distributed.ReduceOp.MAX, group=get_tensor_model_parallel_group()
)
# Subtract the maximum value.
vocab_parallel_logits = vocab_parallel_logits - logits_max.unsqueeze(dim=-1)
# Get the partition's vocab indecies
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = get_tensor_model_parallel_rank()
world_size = get_tensor_model_parallel_world_size()
vocab_start_index, vocab_end_index = get_vocab_range(partition_vocab_size, rank, world_size)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits_1d = predicted_logits_1d.clone().contiguous()
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(
predicted_logits, op=torch.distributed.ReduceOp.SUM, group=get_tensor_model_parallel_group()
)
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = vocab_parallel_logits
torch.exp(vocab_parallel_logits, out=exp_logits)
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(
sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=get_tensor_model_parallel_group()
)
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
vocab_size = exp_logits.size(-1)
if label_smoothing > 0:
"""
We'd like to assign 1 / (K - 1) probability mass to every index that is not the ground truth.
= (1 - alpha) * y_gt + alpha * mean(y_{i for i != gt})
= (1 - alpha) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i
= ((K - 1) * (1 - alpha) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i
= (K * (1 - alpha) - 1) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i} y_i
= (1 - (alpha * K) / (K - 1)) * y_gt + ( (alpha * K) / (K - 1) ) * \sum_{i} y_i / K
From: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/losses/smoothed_cross_entropy.py
"""
assert 1.0 > label_smoothing > 0.0
smoothing = label_smoothing * vocab_size / (vocab_size - 1)
# Exp logits at this point are normalized probabilities. So we can just take the log to get log-probs.
log_probs = torch.log(exp_logits)
mean_log_probs = log_probs.mean(dim=-1)
loss = (1.0 - smoothing) * loss - smoothing * mean_log_probs
ctx.label_smoothing, ctx.vocab_size = label_smoothing, vocab_size
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
@staticmethod
def backward(ctx, grad_output):
# Retreive tensors from the forward path.
softmax, target_mask, masked_target_1d = ctx.saved_tensors
label_smoothing, vocab_size = ctx.label_smoothing, ctx.vocab_size
# All the inputs have softmax as thier gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device)
softmax_update = 1.0 - target_mask.view(-1).float()
if label_smoothing > 0:
smoothing = label_smoothing * vocab_size / (vocab_size - 1)
grad_2d[arange_1d, masked_target_1d] -= (1.0 - smoothing) * softmax_update
average_grad = 1 / vocab_size
grad_2d[arange_1d, :] -= smoothing * average_grad
else:
grad_2d[arange_1d, masked_target_1d] -= softmax_update
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None, None
def vocab_parallel_cross_entropy(vocab_parallel_logits, target, label_smoothing=0.0):
"""Helper function for the cross entropy."""
return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target, label_smoothing)
|
apex-master
|
apex/transformer/tensor_parallel/cross_entropy.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(mkozuki): Remove this file as Megatron-LM seems to have done so.
import torch
# A dictionary of all the memory buffers allocated.
_MEM_BUFFS = dict()
def allocate_mem_buff(name, numel, dtype, track_usage):
"""Allocate a memory buffer."""
assert name not in _MEM_BUFFS, "memory buffer {} already allocated.".format(name)
_MEM_BUFFS[name] = MemoryBuffer(name, numel, dtype, track_usage)
return _MEM_BUFFS[name]
def get_mem_buff(name):
"""Get the memory buffer."""
return _MEM_BUFFS[name]
class MemoryBuffer:
"""Contiguous memory buffer.
Allocate a contiguous memory of type `dtype` and size `numel`. It is
used to reduce memory fragmentation.
Usage: After the allocation, the `_start` index is set tot the first
index of the memory. A memory chunk starting from `_start` index
can be `allocated` for an input tensor, with the elements of the
tensor being coppied. The buffer can be reused by resetting the
`_start` index.
"""
def __init__(self, name, numel, dtype, track_usage):
if torch.distributed.get_rank() == 0:
element_size = torch.tensor([], dtype=dtype).element_size()
print(
"> building the {} memory buffer with {} num elements "
"and {} dtype ({:.1f} MB)...".format(
name, numel, dtype, numel * element_size / 1024 / 1024
),
flush=True,
)
self.name = name
self.numel = numel
self.dtype = dtype
self.data = torch.empty(
self.numel,
dtype=self.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
# Index tracking the start of the free memory.
self._start = 0
# Values used for tracking usage.
self.track_usage = track_usage
if self.track_usage:
self.in_use_value = 0.0
self.total_value = 0.0
def reset(self):
"""Reset the buffer start index to the beginning of the buffer."""
self._start = 0
def is_in_use(self):
"""Whether the current buffer hold on to any memory."""
return self._start > 0
def numel_in_use(self):
"""Return number of elements in use."""
return self._start
def add(self, tensor):
"""Allocate a chunk of memory from the buffer to tensor and copy
the values."""
assert (
tensor.dtype == self.dtype
), "Input tensor type {} different from buffer type {}".format(
tensor.dtype, self.dtype
)
# Number of elements of the input tensor.
tensor_numel = torch.numel(tensor)
new_start = self._start + tensor_numel
assert (
new_start <= self.numel
), "Not enough memory left in the buffer ({} > {})".format(
tensor_numel, self.numel - self._start
)
# New tensor is a view into the memory.
new_tensor = self.data[self._start : new_start]
self._start = new_start
new_tensor = new_tensor.view(tensor.shape)
new_tensor.copy_(tensor)
# Return a pointer to the new tensor.
return new_tensor
def get_data(self):
"""Return the data currently in use."""
if self.track_usage:
self.in_use_value += float(self._start)
self.total_value += float(self.numel)
return self.data[: self._start]
def print_average_usage(self):
"""Print memory usage average over time. We would like this value
to be as high as possible."""
assert self.track_usage, "You need to enable track usage."
if torch.distributed.get_rank() == 0:
print(
" > usage of {} memory buffer: {:.2f} %".format(
self.name, self.in_use_value * 100.0 / self.total_value
),
flush=True,
)
class RingMemBuffer:
"""A ring of memory buffers."""
def __init__(self, name, num_buffers, numel, dtype, track_usage):
self.num_buffers = num_buffers
self.buffers = [
allocate_mem_buff(name + " {}".format(i), numel, dtype, track_usage)
for i in range(num_buffers)
]
self._index = -1
def get_next_buffer(self):
self._index += 1
self._index = self._index % self.num_buffers
buff = self.buffers[self._index]
assert not buff.is_in_use(), "buffer is already in use."
return buff
|
apex-master
|
apex/transformer/tensor_parallel/memory.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model parallel utility interface."""
from apex.transformer.tensor_parallel.cross_entropy import vocab_parallel_cross_entropy
from apex.transformer.tensor_parallel.data import broadcast_data
from apex.transformer.tensor_parallel.layers import (
ColumnParallelLinear,
RowParallelLinear,
VocabParallelEmbedding,
set_tensor_model_parallel_attributes,
set_defaults_if_not_set_tensor_model_parallel_attributes,
copy_tensor_model_parallel_attributes,
)
from apex.transformer.tensor_parallel.mappings import (
copy_to_tensor_model_parallel_region,
gather_from_tensor_model_parallel_region,
reduce_from_tensor_model_parallel_region,
scatter_to_tensor_model_parallel_region,
scatter_to_sequence_parallel_region,
)
from .random import (
checkpoint,
get_cuda_rng_tracker,
init_checkpointed_activations_memory_buffer,
model_parallel_cuda_manual_seed,
reset_checkpointed_activations_memory_buffer,
)
from apex.transformer.tensor_parallel.utils import split_tensor_along_last_dim
__all__ = [
# cross_entropy.py
"vocab_parallel_cross_entropy",
# data.py
"broadcast_data",
# layers.py
"ColumnParallelLinear",
"RowParallelLinear",
"VocabParallelEmbedding",
"set_tensor_model_parallel_attributes",
"set_defaults_if_not_set_tensor_model_parallel_attributes",
"copy_tensor_model_parallel_attributes",
# mappings.py
"copy_to_tensor_model_parallel_region",
"gather_from_tensor_model_parallel_region",
"reduce_from_tensor_model_parallel_region",
"scatter_to_tensor_model_parallel_region",
"scatter_to_sequence_parallel_region",
# random.py
"checkpoint",
"get_cuda_rng_tracker",
"init_checkpointed_activations_memory_buffer",
"model_parallel_cuda_manual_seed",
"reset_checkpointed_activations_memory_buffer",
# utils.py
"split_tensor_along_last_dim",
]
|
apex-master
|
apex/transformer/tensor_parallel/__init__.py
|
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE(mkozuki): This file is based on megatron-lm/mpu/random.py with some differences:
# - Not using "viewless" tensor:
# - _kernel_make_viewless_tensor
# - MakeViewlessTensor
# - make_viewless_tensor
# - assert_viewless_tensor
# - safely_set_viewless_tensor_data
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import contextlib
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from torch.utils.checkpoint import detach_variable
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.tensor_parallel.memory import allocate_mem_buff
from apex.transformer.utils import split_tensor_into_1d_equal_chunks
from apex.transformer.utils import gather_split_1d_tensor
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = "model-parallel-rng"
# TODO(mkozuki): Remove `_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER` as megatron-lm doesn't seem to use.
# Whether apply model parallelism to checkpointed hidden states.
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = None
# TODO(mkozuki): Remove `init_checkpointed_activations_memory_buffer` as megatron-lm doesn't seem to use.
def init_checkpointed_activations_memory_buffer(
micro_batch_size,
max_position_embeddings,
hidden_size,
num_layers,
tensor_model_parallel_size,
checkpoint_num_layers,
fp16,
):
"""Initializ the memory buffer for the checkpointed activations."""
per_layer = (
micro_batch_size
* max_position_embeddings
* hidden_size
// tensor_model_parallel_size
)
assert (
num_layers % checkpoint_num_layers == 0
), "number of layers is not divisible by checkpoint-num-layers"
num_checkpointer_layers = num_layers // checkpoint_num_layers
numel = per_layer * num_checkpointer_layers
dtype = torch.half
if not fp16:
dtype = torch.float
global _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER
assert (
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is None
), "checkpointed activations memory buffer is already allocated."
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = allocate_mem_buff(
"checkpointed activations", numel, dtype, track_usage=False
)
# TODO(mkozuki): Remove `reset_checkpointed_activations_memory_buffer` as megatron-lm doesn't seem to use.
def reset_checkpointed_activations_memory_buffer():
"""Reset the memory used for checkpointing."""
if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None:
_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER.reset()
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Arguments:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, "_cuda_setRNGState") and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device("cuda")
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device("cuda", device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb)
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self):
# Map from a string name to the cuda rng state.
self.states_ = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_ = set()
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
states = {}
for name in self.states_:
states[name] = self.states_[name]
return states
def set_states(self, states):
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name, seed):
"""Track the rng state."""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception("seed {} already exists".format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception("cuda rng state {} already exists".format(name))
# Get the current rng state.
orig_rng_state = torch.cuda.get_rng_state()
# Set the new state and store it.
torch.cuda.manual_seed(seed)
self.states_[name] = torch.cuda.get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception("cuda rng state {} is not added".format(name))
# Store current rng state.
orig_cuda_rng_state = torch.cuda.get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = torch.cuda.get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-tensor-model-parallel regions.
tensor-model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank()
# Data parallel gets the original seed.
data_parallel_seed = seed
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(
_MODEL_PARALLEL_RNG_TRACKER_NAME, tensor_model_parallel_seed
)
# TODO (mkozuki): Move the below gradient checkpoint related features to another (new) file.
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
2) the states in the model parallel tracker are also properly
tracked/set/reset.
"""
@staticmethod
def forward(ctx, run_function, distribute_saved_activations, *args):
ctx.run_function = run_function
ctx.distribute_saved_activations = distribute_saved_activations
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
with torch.no_grad():
outputs = run_function(*args)
# Divide hidden states across model parallel group and only keep
# the chunk corresponding to the current rank.
if ctx.distribute_saved_activations:
ctx.input_0_shape = args[0].shape
# Store everything.
ctx.save_for_backward(*args)
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad(), "
"please use .backward() if possible"
)
inputs = ctx.saved_tensors
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = torch.cuda.get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# Compute the forward pass.
detached_inputs = detach_variable(inputs)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else inp
for inp in detached_inputs
)
return (None, None) + grads
# NOTE(mkozuki): It doesn't look like `distribute_saved_activations` is used in apex.transformer
# but I added this change to reduce the superficial difference from Megatron-LM.
def checkpoint(function, distribute_saved_activations, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint."""
return CheckpointFunction.apply(function, distribute_saved_activations, *args)
|
apex-master
|
apex/transformer/tensor_parallel/random.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Sequence
import torch
from apex.transformer.utils import divide
def split_tensor_along_last_dim(
tensor: torch.Tensor,
num_partitions: int,
contiguous_split_chunks: bool = False,
) -> List[torch.Tensor]:
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class VocabUtility:
"""Split the vocabulary into `world_size` chunks and return the
first and last index of the vocabulary belonging to the `rank`
partition: Note that indices in [fist, last)"""
@staticmethod
def vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size: int, rank, world_size: int
) -> Sequence[int]:
index_f = rank * per_partition_vocab_size
index_l = index_f + per_partition_vocab_size
return index_f, index_l
@staticmethod
def vocab_range_from_global_vocab_size(global_vocab_size: int, rank: int, world_size: int) -> Sequence[int]:
per_partition_vocab_size = divide(global_vocab_size, world_size)
return VocabUtility.vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size, rank, world_size
)
|
apex-master
|
apex/transformer/tensor_parallel/utils.py
|
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
from typing import Optional, Dict, Tuple, List
import warnings
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from apex._autocast_utils import _cast_if_autocast_enabled
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.transformer.utils import divide
from apex.transformer.tensor_parallel.mappings import (
copy_to_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
gather_from_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
reduce_from_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
scatter_to_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
reduce_scatter_to_sequence_parallel_region,
)
from apex.transformer.tensor_parallel.random import get_cuda_rng_tracker
from apex.transformer.tensor_parallel.utils import VocabUtility
from apex.transformer.log_util import get_transformer_logger
# `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for
# `_all_gather_base` and `_reduce_scatter_base`. They require the most recent
# version of PyTorch. The following 4 lines are for backward comparability with
# older PyTorch.
if "reduce_scatter_tensor" not in dir(torch.distributed):
torch.distributed.reduce_scatter_tensor = torch.distributed._reduce_scatter_base
if "all_gather_into_tensor" not in dir(torch.distributed):
torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
_logger = get_transformer_logger(__name__)
_grad_accum_fusion_available = True
try:
import fused_weight_gradient_mlp_cuda
except ImportError:
_grad_accum_fusion_available = False
_MODEL_PARALLEL_ATTRIBUTE_DEFAULTS = {
"tensor_model_parallel": False,
"partition_dim": -1,
"partition_stride": 1,
}
def param_is_not_tensor_parallel_duplicate(param: torch.Tensor) -> bool:
return (
hasattr(param, "tensor_model_parallel") and param.tensor_model_parallel
) or (get_tensor_model_parallel_rank() == 0)
def set_tensor_model_parallel_attributes(tensor: torch.Tensor, is_parallel: bool, dim: int, stride: int) -> None:
# Make sure the attributes are not set.
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
assert not hasattr(tensor, attribute)
# Set the attributes.
setattr(tensor, "tensor_model_parallel", is_parallel)
setattr(tensor, "partition_dim", dim)
setattr(tensor, "partition_stride", stride)
def set_defaults_if_not_set_tensor_model_parallel_attributes(tensor: torch.Tensor) -> None:
def maybe_set(attribute, value):
if not hasattr(tensor, attribute):
setattr(tensor, attribute, value)
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
maybe_set(attribute, _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS[attribute])
def copy_tensor_model_parallel_attributes(destination_tensor: torch.Tensor, source_tensor: torch.Tensor) -> None:
def maybe_copy(attribute):
if hasattr(source_tensor, attribute):
setattr(destination_tensor, attribute, getattr(source_tensor, attribute))
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
maybe_copy(attribute)
def _initialize_affine_weight_gpu(weight, init_method, partition_dim, stride=1):
"""Initialize affine weight for model parallel on GPU.
Args:
weight (Parameter):
init_method (Callable[[Tensor], None]): Taking a Tensor and initialize its elements.
partition_dim (int): Dimension to apply partition.
stride (int):
"""
set_tensor_model_parallel_attributes(
tensor=weight, is_parallel=True, dim=partition_dim, stride=stride
)
with get_cuda_rng_tracker().fork():
init_method(weight)
# TODO (mkozuki): Re-consider removing params_dtype from arguments to make this
# more parallel with _initialize_affine_weight_gpu
def _initialize_affine_weight_cpu(
weight,
output_size,
input_size,
per_partition_size,
partition_dim,
init_method,
stride=1,
return_master_weight=False,
*,
params_dtype=torch.float32,
):
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
set_tensor_model_parallel_attributes(
tensor=weight, is_parallel=True, dim=partition_dim, stride=stride
)
# Initialize master weight
master_weight = torch.empty(
output_size, input_size, dtype=torch.float, requires_grad=False
)
init_method(master_weight)
master_weight = master_weight.to(dtype=params_dtype)
# Split and copy
per_partition_per_stride_size = divide(per_partition_size, stride)
weight_list = torch.split(
master_weight, per_partition_per_stride_size, dim=partition_dim
)
rank = get_tensor_model_parallel_rank()
world_size = get_tensor_model_parallel_world_size()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None
class VocabParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
init_method=init.xavier_normal_,
*,
params_dtype: torch.dtype=torch.float32,
use_cpu_initialization: bool = False,
):
super().__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
# Set the detauls for compatibility.
self.padding_idx = None
self.max_norm = None
self.norm_type = 2.0
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
self.tensor_model_parallel_size = get_tensor_model_parallel_world_size()
# Divide the weight matrix along the vocabulary dimension.
(
self.vocab_start_index,
self.vocab_end_index,
) = VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings,
get_tensor_model_parallel_rank(),
self.tensor_model_parallel_size,
)
self.num_embeddings_per_partition = (
self.vocab_end_index - self.vocab_start_index
)
# Allocate weights and initialize.
if use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
dtype=params_dtype,
)
)
_initialize_affine_weight_cpu(
self.weight,
self.num_embeddings,
self.embedding_dim,
self.num_embeddings_per_partition,
0,
init_method,
params_dtype=params_dtype,
)
else:
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=0, stride=1
)
def forward(self, input_):
if self.tensor_model_parallel_size > 1:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (
input_ >= self.vocab_end_index
)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
else:
masked_input = input_
# Get the embeddings.
output_parallel = F.embedding(
masked_input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# Mask the output embedding.
if self.tensor_model_parallel_size > 1:
output_parallel[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = reduce_from_tensor_model_parallel_region(output_parallel)
return output
class LinearWithGradAccumulationAndAsyncCommunication(torch.autograd.Function):
"""Linear layer execution with asynchronous communication and gradient accumulation fusion in backprop."""
@staticmethod
def forward(
ctx,
input: torch.Tensor,
weight: torch.Tensor,
bias: Optional[torch.Tensor],
gradient_accumulation_fusion: bool,
async_grad_allreduce: bool,
sequence_parallel_enabled: bool,
use_16bit_in_wgrad_accum_fusion: Optional[bool] = None,
):
ctx.use_bias = bias is not None and weight.requires_grad
ctx.gradient_accumulation_fusion = gradient_accumulation_fusion
ctx.async_grad_allreduce = async_grad_allreduce
ctx.sequence_parallel_enabled = sequence_parallel_enabled
ctx.compute_weight_gradient = weight.requires_grad
if use_16bit_in_wgrad_accum_fusion is not None:
warnings.warn(
"Deprecated option `use_16bit_in_wgrad_accum_fusion` "
f"is set to {use_16bit_in_wgrad_accum_fusion}"
)
if ctx.compute_weight_gradient:
ctx.save_for_backward(input, weight)
else:
ctx.save_for_backward(weight)
if ctx.sequence_parallel_enabled:
world_size = get_tensor_model_parallel_world_size()
# `input` is supposed to be 3D and its order of dimension is [sequence, batch, hidden]
shape = list(input.shape)
shape[0] *= world_size
all_gather_buffer = torch.empty(
shape,
dtype=input.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
torch.distributed.all_gather_into_tensor(all_gather_buffer, input, group=get_tensor_model_parallel_group())
total_input = all_gather_buffer
else:
total_input = input
output = torch.matmul(total_input, weight.t())
if bias is not None:
output = output + bias
return output
@staticmethod
def backward(ctx, grad_output):
if ctx.compute_weight_gradient:
input, weight = ctx.saved_tensors
else:
weight = ctx.saved_tensors[0]
input = None
use_bias = ctx.use_bias
#only get sequence parallel inputs if need to calculate weight grad
handle = None
if ctx.compute_weight_gradient:
if ctx.sequence_parallel_enabled:
world_size = get_tensor_model_parallel_world_size()
shape = list(input.shape)
shape[0] *= world_size
all_gather_buffer = torch.empty(
shape,
dtype=input.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
handle = torch.distributed.all_gather_into_tensor(
all_gather_buffer,
input,
group=get_tensor_model_parallel_group(),
async_op=True,
)
total_input = all_gather_buffer
else:
total_input = input
grad_input = grad_output.matmul(weight)
if handle is not None:
handle.wait()
if ctx.async_grad_allreduce:
# Asynchronous all-reduce
handle = torch.distributed.all_reduce(
grad_input, group=get_tensor_model_parallel_group(), async_op=True
)
#if no weight gradient, immediately return
if not ctx.compute_weight_gradient:
if ctx.sequence_parallel_enabled:
assert not ctx.async_grad_allreduce
world_size = get_tensor_model_parallel_world_size()
shape = list(grad_input.shape)
shape[0] //= world_size
sub_grad_input = torch.empty(torch.Size(shape), dtype=grad_input.dtype, device=torch.cuda.current_device(), requires_grad=False)
handle = torch.distributed.reduce_scatter_tensor(
sub_grad_input,
grad_input,
group=get_tensor_model_parallel_group(),
async_op=True
)
handle.wait()
return sub_grad_input, None, None, None, None, None, None
if ctx.async_grad_allreduce:
handle.wait()
return grad_input, None, None, None, None, None, None
# Convert the tensor shapes to 2D for execution compatibility
grad_output = grad_output.contiguous()
grad_output = grad_output.view(
grad_output.shape[0] * grad_output.shape[1], grad_output.shape[2]
)
total_input = total_input.view(total_input.shape[0] * total_input.shape[1], total_input.shape[2])
if ctx.sequence_parallel_enabled:
assert not ctx.async_grad_allreduce
sub_grad_input = torch.empty(input.shape, dtype=input.dtype, device=torch.cuda.current_device(), requires_grad=False)
handle = torch.distributed.reduce_scatter_tensor(
sub_grad_input,
grad_input,
group=get_tensor_model_parallel_group(),
async_op=True
)
if ctx.gradient_accumulation_fusion:
if not hasattr(weight, "main_grad"):
raise RuntimeError("attempted to perform gradient accumulation fusion on param without setting main_grad")
if weight.main_grad.dtype == torch.float32:
fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(
total_input, grad_output, weight.main_grad
)
elif weight.main_grad.dtype in (torch.float16, torch.bfloat16):
fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(
total_input, grad_output, weight.main_grad
)
else:
raise RuntimeError(f"unsupported dtype for main_grad ({weight.main_grad.dtype})")
grad_weight = None
else:
grad_weight = grad_output.t().matmul(total_input)
grad_bias = grad_output.sum(dim=0) if use_bias else None
if ctx.sequence_parallel_enabled:
handle.wait()
return sub_grad_input, grad_weight, grad_bias, None, None, None, None
if ctx.async_grad_allreduce:
handle.wait()
return grad_input, grad_weight, grad_bias, None, None, None, None
def linear_with_grad_accumulation_and_async_allreduce(
input: torch.Tensor,
weight: torch.Tensor,
bias: Optional[torch.Tensor],
gradient_accumulation_fusion: bool,
async_grad_allreduce: bool,
sequence_parallel_enabled: bool,
) -> torch.Tensor:
args = _cast_if_autocast_enabled(
input,
weight,
bias,
gradient_accumulation_fusion,
async_grad_allreduce,
sequence_parallel_enabled,
)
with torch.cuda.amp.autocast(enabled=False):
return LinearWithGradAccumulationAndAsyncCommunication.apply(*args)
class ColumnParallelLinear(torch.nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
.. note::
Input is supposed to be three dimensional and each dimension
is expected to be sequence, batch, and hidden feature, respectively.
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias
gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is Y_i = XA_i
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
Keyword Arguments:
no_async_tensor_model_parallel_allreduce:
params_dtype:
use_cpu_initialization:
gradient_accumulation_fusion:
sequence_parallel_enabled:
accumulation_in_fp16: Deprecated
"""
def __init__(
self,
input_size,
output_size,
bias=True,
gather_output=True,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
*,
no_async_tensor_model_parallel_allreduce=False,
params_dtype=torch.float32,
use_cpu_initialization=False,
gradient_accumulation_fusion=False,
sequence_parallel_enabled: bool = False,
accumulation_in_fp16: Optional[bool] = None,
):
super().__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.gather_output = gather_output
# Divide the weight matrix along the last dimension.
world_size = get_tensor_model_parallel_world_size()
self.output_size_per_partition = divide(output_size, world_size)
self.skip_bias_add = skip_bias_add
if accumulation_in_fp16 is not None:
warnings.warn(
f"Deprecated option `accumulation_in_fp16` is set to {accumulation_in_fp16}"
)
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
if use_cpu_initialization:
self.weight = Parameter(
torch.empty(self.output_size_per_partition, self.input_size, dtype=params_dtype)
)
self.master_weight = _initialize_affine_weight_cpu(
self.weight,
self.output_size,
self.input_size,
self.output_size_per_partition,
0,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
params_dtype=params_dtype,
)
else:
self.weight = Parameter(
torch.empty(
self.output_size_per_partition,
self.input_size,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
_initialize_affine_weight_gpu(self.weight, init_method, partition_dim=0, stride=stride)
if bias:
if use_cpu_initialization:
self.bias = Parameter(torch.empty(self.output_size_per_partition, dtype=params_dtype))
else:
self.bias = Parameter(
torch.empty(
self.output_size_per_partition,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
set_tensor_model_parallel_attributes(self.bias, True, 0, stride)
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
self.async_tensor_model_parallel_allreduce = (
not no_async_tensor_model_parallel_allreduce and world_size > 1
)
if sequence_parallel_enabled:
if world_size <= 1:
warnings.warn(
f"`sequence_parallel_enabled` is set to `True`, but got world_size of {world_size}"
)
# sequence_parallel_enabled = False
self.sequence_parallel_enabled = sequence_parallel_enabled
if gradient_accumulation_fusion:
if not _grad_accum_fusion_available:
# Basically, apex.transformer module users are expected to install APEX's
# `--cpp_ext` and `--cuda_ext`. The example installation command is as follows:
# `pip install --global-option="--cpp_ext" --global-option="--cuda_ext ."
# at the root of APEX repository.
warnings.warn(
"`gradient_accumulation_fusion` is set to `True` but "
"the custom CUDA extension of `fused_weight_gradient_mlp_cuda` module not "
"found. Thus `gradient_accumulation_fusion` set to `False`. "
"Note that the extension requires CUDA>=11."
)
gradient_accumulation_fusion = False
self.gradient_accumulation_fusion = gradient_accumulation_fusion
if self.async_tensor_model_parallel_allreduce and self.sequence_parallel_enabled:
raise RuntimeError("`async_tensor_model_parallel_allreduce` and `sequence_parallel_enabled` cannot be enabled at the same time.")
self._forward_impl = linear_with_grad_accumulation_and_async_allreduce
def forward(self, input_: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Forward of ColumnParallelLinear
Args:
input_: 3D tensor whose order of dimension is [sequence, batch, hidden]
Returns:
- output
- bias
"""
bias = self.bias if not self.skip_bias_add else None
if self.async_tensor_model_parallel_allreduce or self.sequence_parallel_enabled:
input_parallel = input_
else:
input_parallel = copy_to_tensor_model_parallel_region(input_)
# Matrix multiply.
output_parallel = self._forward_impl(
input=input_parallel,
weight=self.weight,
bias=bias,
gradient_accumulation_fusion=self.gradient_accumulation_fusion,
async_grad_allreduce=self.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=self.sequence_parallel_enabled,
)
if self.gather_output:
# All-gather across the partitions.
assert not self.sequence_parallel_enabled
output = gather_from_tensor_model_parallel_region(output_parallel)
else:
output = output_parallel
output_bias = self.bias if self.skip_bias_add else None
return output, output_bias
class RowParallelLinear(torch.nn.Module):
"""Linear layer with row parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its first dimension and X along its second dimension as:
- -
| A_1 |
| . |
A = | . | X = [X_1, ..., X_p]
| . |
| A_p |
- -
.. note::
Input is supposed to be three dimensional and each dimension
is expected to be sequence, batch, and hidden feature, respectively.
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias. Note that bias is not parallelized.
input_is_parallel: If true, we assume that the input is already
split across the GPUs and we do not split
again.
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimization where bias
can be fused with other elementwise operations. We skip
adding bias but instead return it.
Keyword Arguments:
params_dtype:
use_cpu_initialization:
gradient_accumulation_fusion:
sequence_parallel_enabled:
accumulation_in_fp16: Deprecated
"""
def __init__(
self,
input_size,
output_size,
bias=True,
input_is_parallel=False,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
*,
params_dtype=torch.float32,
use_cpu_initialization=False,
gradient_accumulation_fusion=False,
sequence_parallel_enabled: bool = False,
accumulation_in_fp16: Optional[bool] = None,
):
super().__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.input_is_parallel = input_is_parallel
# Divide the weight matrix along the last dimension.
world_size = get_tensor_model_parallel_world_size()
self.input_size_per_partition = divide(input_size, world_size)
self.skip_bias_add = skip_bias_add
self.gradient_accumulation_fusion = gradient_accumulation_fusion
self.sequence_parallel_enabled = sequence_parallel_enabled
if self.sequence_parallel_enabled and not self.input_is_parallel:
raise RuntimeError("To enable `sequence_parallel_enabled`, `input_is_parallel` must be `True`")
if accumulation_in_fp16 is not None:
warnings.warn(
f"Deprecated option `accumulation_in_fp16` is set to {accumulation_in_fp16}"
)
# as an argument to this function?
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
if use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.output_size, self.input_size_per_partition, dtype=params_dtype
)
)
self.master_weight = _initialize_affine_weight_cpu(
self.weight,
self.output_size,
self.input_size,
self.input_size_per_partition,
1,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
params_dtype=params_dtype,
)
else:
self.weight = Parameter(
torch.empty(
self.output_size,
self.input_size_per_partition,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=1, stride=stride
)
if bias:
if use_cpu_initialization:
self.bias = Parameter(torch.empty(self.output_size, dtype=params_dtype))
else:
self.bias = Parameter(
torch.empty(
self.output_size,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
setattr(self.bias, "sequence_parallel_enabled", sequence_parallel_enabled)
else:
self.register_parameter("bias", None)
self._forward_impl = linear_with_grad_accumulation_and_async_allreduce
def forward(self, input_: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Forward of RowParallelLinear
Args:
input_: 3D tensor whose order of dimension is [sequence, batch, hidden]
Returns:
- output
- bias
"""
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
assert not self.sequence_parallel_enabled
input_parallel = scatter_to_tensor_model_parallel_region(input_)
# Matrix multiply.
output_parallel = self._forward_impl(
input=input_parallel,
weight=self.weight,
bias=None,
gradient_accumulation_fusion=self.gradient_accumulation_fusion,
async_grad_allreduce=False,
sequence_parallel_enabled=False,
)
# All-reduce across all the partitions.
if self.sequence_parallel_enabled:
output_ = reduce_scatter_to_sequence_parallel_region(output_parallel)
else:
output_ = reduce_from_tensor_model_parallel_region(output_parallel)
if not self.skip_bias_add:
output = output_ + self.bias if self.bias is not None else output_
output_bias = None
else:
output = output_
output_bias = self.bias
return output, output_bias
|
apex-master
|
apex/transformer/tensor_parallel/layers.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.parallel_state import get_tensor_model_parallel_src_rank
_MAX_DATA_DIM = 5
def _check_data_types(keys, data, target_dtype):
"""Check that all the keys have the same target data type."""
for key in keys:
assert data[key].dtype == target_dtype, (
"{} has data type {} which "
"is different than {}".format(key, data[key].dtype, target_dtype)
)
def _build_key_size_numel_dictionaries(keys, data):
"""Build the size on rank 0 and broadcast."""
max_dim = _MAX_DATA_DIM
sizes = [0 for _ in range(max_dim) for _ in keys]
# Pack the sizes on rank zero.
if get_tensor_model_parallel_rank() == 0:
offset = 0
for key in keys:
assert data[key].dim() < max_dim, "you should increase MAX_DATA_DIM"
size = data[key].size()
for i, s in enumerate(size):
sizes[i + offset] = s
offset += max_dim
# Move to GPU and broadcast.
sizes_cuda = torch.cuda.LongTensor(sizes)
torch.distributed.broadcast(
sizes_cuda,
get_tensor_model_parallel_src_rank(),
group=get_tensor_model_parallel_group(),
)
# Move back to cpu and unpack.
sizes_cpu = sizes_cuda.cpu()
key_size = {}
key_numel = {}
total_numel = 0
offset = 0
for key in keys:
i = 0
size = []
numel = 1
while sizes_cpu[offset + i] > 0:
this_size = sizes_cpu[offset + i]
size.append(this_size)
numel *= this_size
i += 1
key_size[key] = size
key_numel[key] = numel
total_numel += numel
offset += max_dim
return key_size, key_numel, total_numel
def broadcast_data(keys, data, datatype):
"""Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data disctionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
"""
# Build (key, size) and (key, number of elements) dictionaries along
# with the total number of elements on all ranks.
key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys, data)
# Pack on rank zero.
if get_tensor_model_parallel_rank() == 0:
# Check that all keys have the same data type.
_check_data_types(keys, data, datatype)
# Flatten the data associated with the keys
flatten_data = torch.cat(
[data[key].contiguous().view(-1) for key in keys], dim=0
).cuda()
else:
flatten_data = torch.empty(
total_numel, device=torch.cuda.current_device(), dtype=datatype
)
# Broadcast
torch.distributed.broadcast(
flatten_data,
get_tensor_model_parallel_src_rank(),
group=get_tensor_model_parallel_group(),
)
# Unpack
output = {}
offset = 0
for key in keys:
size = key_size[key]
numel = key_numel[key]
output[key] = flatten_data.narrow(0, offset, numel).view(size)
offset += numel
return output
|
apex-master
|
apex/transformer/tensor_parallel/data.py
|
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.tensor_parallel.utils import split_tensor_along_last_dim
# `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for
# `_all_gather_base` and `_reduce_scatter_base`. They require the most recent
# version of PyTorch. The following 4 lines are for backward comparability with
# older PyTorch.
if "all_gather_into_tensor" not in dir(torch.distributed):
torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
if "reduce_scatter_tensor" not in dir(torch.distributed):
torch.distributed.reduce_scatter_tensor = torch.distributed._reduce_scatter_base
def _reduce(input_: torch.Tensor) -> torch.Tensor:
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_tensor_model_parallel_world_size() == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group())
return input_
def _split_along_last_dim(input_: torch.Tensor) -> torch.Tensor:
"""Split the tensor along its last dimension and keep the
corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Split along last dimension.
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = get_tensor_model_parallel_rank()
output = input_list[rank].contiguous()
return output
def _split_along_first_dim(input_: torch.Tensor) -> torch.Tensor:
"""Split the tensor along its first dimension and keep the corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU for tensor model parallel.
if world_size == 1:
return input_
# Split along first dimension.
dim_size = input_.size(0)
assert dim_size % world_size == 0
local_dim_size = dim_size // world_size
dim_offset = get_tensor_model_parallel_rank() * local_dim_size
output = input_[dim_offset:dim_offset + local_dim_size].contiguous()
return output
def _gather_along_last_dim(input_: torch.Tensor) -> torch.Tensor:
"""Gather tensors and concatenate along the last dimension."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = get_tensor_model_parallel_rank()
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(
tensor_list, input_, group=get_tensor_model_parallel_group()
)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
def _gather_along_first_dim(input_: torch.Tensor) -> torch.Tensor:
"""Gather tensors and concatenate along the first dimension."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
shape = list(input_.shape)
shape[0] *= world_size
output = torch.empty(shape, dtype=input_.dtype, device=torch.cuda.current_device())
torch.distributed.all_gather_into_tensor(
output,
input_.contiguous(),
group=get_tensor_model_parallel_group()
)
return output
def _reduce_scatter_along_first_dim(input_: torch.Tensor) -> torch.Tensor:
"""Reduce-scatter the input tensor across model parallel group."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
shape = list(input_.shape)
assert shape[0] % world_size == 0
shape[0] //= world_size
output = torch.empty(shape, dtype=input_.dtype, device=torch.cuda.current_device())
torch.distributed.reduce_scatter_tensor(
output,
input_.contiguous(),
group=get_tensor_model_parallel_group()
)
return output
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the tensor model parallel region."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return input_
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output)
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-reduce the input from the tensor model parallel region."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _reduce(input_)
@staticmethod
def forward(ctx, input_):
return _reduce(input_)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _split_along_last_dim(input_)
@staticmethod
def forward(ctx, input_):
return _split_along_last_dim(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather_along_last_dim(grad_output)
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from tensor model parallel region and concatenate."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _gather_along_last_dim(input_)
@staticmethod
def forward(ctx, input_):
return _gather_along_last_dim(input_)
@staticmethod
def backward(ctx, grad_output):
return _split_along_last_dim(grad_output)
class _ScatterToSequenceParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chunk to the rank."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _split_along_first_dim(input_)
@staticmethod
def forward(ctx, input_):
return _split_along_first_dim(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather_along_first_dim(grad_output)
class _GatherFromSequenceParallelRegion(torch.autograd.Function):
"""Gather the input from sequence parallel region and concatenate."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_, to_model_parallel: bool = True):
return _gather_along_first_dim(input_)
@staticmethod
def forward(ctx, input_, to_model_parallel: bool = True):
ctx.to_model_parallel = to_model_parallel
return _gather_along_first_dim(input_)
@staticmethod
def backward(ctx, grad_output):
if ctx.to_model_parallel:
return _reduce_scatter_along_first_dim(grad_output), None
else:
return _split_along_first_dim(grad_output), None
class _ReduceScatterToSequenceParallelRegion(torch.autograd.Function):
"""Reduce scatter the input from the sequence parallel region and concatenate."""
# FIXME(mkozuki): Definition of static symbolic methods don't look correct according to
# https://pytorch.org/docs/stable/onnx.html#static-symbolic-method
@staticmethod
def symbolic(graph, input_):
return _reduce_scatter_along_first_dim(input_)
@staticmethod
def forward(ctx, input_):
return _reduce_scatter_along_first_dim(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather_along_first_dim(grad_output)
# -----------------
# Helper functions.
# -----------------
def copy_to_tensor_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _CopyToModelParallelRegion.apply(input_)
def reduce_from_tensor_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ReduceFromModelParallelRegion.apply(input_)
def scatter_to_tensor_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ScatterToModelParallelRegion.apply(input_)
def gather_from_tensor_model_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _GatherFromModelParallelRegion.apply(input_)
def scatter_to_sequence_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ScatterToSequenceParallelRegion.apply(input_)
def gather_from_sequence_parallel_region(input_: torch.Tensor, to_model_parallel: bool = True) -> torch.Tensor:
return _GatherFromSequenceParallelRegion.apply(input_, to_model_parallel)
def reduce_scatter_to_sequence_parallel_region(input_: torch.Tensor) -> torch.Tensor:
return _ReduceScatterToSequenceParallelRegion.apply(input_)
__all__ = [
"copy_to_tensor_model_parallel_region",
"reduce_from_tensor_model_parallel_region",
"scatter_to_tensor_model_parallel_region",
"gather_from_tensor_model_parallel_region",
"scatter_to_sequence_parallel_region",
"gather_from_sequence_parallel_region",
"reduce_scatter_to_sequence_parallel_region",
]
|
apex-master
|
apex/transformer/tensor_parallel/mappings.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
from apex.transformer.layers.layer_norm import FastLayerNorm
from apex.transformer.layers.layer_norm import FusedLayerNorm
from apex.transformer.layers.layer_norm import MixedFusedLayerNorm
__all__ = [
"FastLayerNorm",
"FusedLayerNorm",
"MixedFusedLayerNorm",
]
|
apex-master
|
apex/transformer/layers/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# NOTE(mkozuki): This file defines two LayerNorm that are compatible with Megatron-LM.
# while avoiding introducing the breaking change of `"sequence_parallel_enabled"` attribute into apex.normalization.FusedLayerNorm
# and apex.contrib.layer_norm.FastLayerNorm.
import warnings
import torch
from apex.normalization import FusedLayerNorm as OrigFusedLayerNorm
from apex.normalization import MixedFusedLayerNorm as OrigMixedFusedLayerNorm
try:
from apex.contrib.layer_norm import FastLayerNorm as OrigFastLayerNorm
except ImportError:
HAS_FAST_LAYER_NORM = False
else:
HAS_FAST_LAYER_NORM = True
__all__ = [
"FusedLayerNorm",
"FastLayerNorm",
"MixedFusedLayerNorm",
]
def _set_sequence_parallel_enabled(
param: torch.Tensor,
sequence_parallel_enabled: bool,
) -> None:
setattr(param, "sequence_parallel_enabled", sequence_parallel_enabled)
class FusedLayerNorm(OrigFusedLayerNorm):
def __init__(
self,
normalized_shape,
eps: float = 1e-5,
elementwise_affine: bool = True,
*,
sequence_parallel_enabled: bool = False,
):
super().__init__(
normalized_shape=normalized_shape,
eps=eps,
elementwise_affine=elementwise_affine,
)
self.sequence_parallel_enabled = sequence_parallel_enabled
if self.elementwise_affine:
_set_sequence_parallel_enabled(self.weight, self.sequence_parallel_enabled)
_set_sequence_parallel_enabled(self.bias, self.sequence_parallel_enabled)
# note: MixedFusedLayerNorm is no different from FusedLayerNorm if it's used in `torch.cuda.amp`.
class MixedFusedLayerNorm(OrigMixedFusedLayerNorm):
def __init__(
self,
normalized_shape,
eps: float = 1e-5,
**kwargs,
) -> None:
self.sequence_parallel_enabled = kwargs.get("sequence_parallel_enabled", False)
super().__init__(normalized_shape=normalized_shape, eps=eps, **kwargs)
if self.sequence_parallel_enabled:
_set_sequence_parallel_enabled(self.weight, self.sequence_parallel_enabled)
_set_sequence_parallel_enabled(self.bias, self.sequence_parallel_enabled)
if HAS_FAST_LAYER_NORM:
class FastLayerNorm(OrigFastLayerNorm):
def __init__(
self,
hidden_size,
eps: float = 1e-5,
*,
sequence_parallel_enabled: bool = False,
):
super().__init__(
hidden_size=hidden_size,
eps=eps
)
self.sequence_parallel_enabled = sequence_parallel_enabled
_set_sequence_parallel_enabled(self.weight, self.sequence_parallel_enabled)
_set_sequence_parallel_enabled(self.bias, self.sequence_parallel_enabled)
else:
class FastLayerNorm(FusedLayerNorm):
def __init__(
self,
hidden_size,
eps: float = 1e-5,
*,
sequence_parallel_enabled: bool = False,
):
warnings.warn("`apex.contrib.layer_norm.FastLayerNorm` isn't available thus falling back to `apex.normalization.FusedLayerNorm`")
super().__init__(
normalized_shape=hidden_size,
eps=eps,
elementwise_affine=True,
sequence_parallel_enabled=sequence_parallel_enabled,
)
|
apex-master
|
apex/transformer/layers/layer_norm.py
|
import time
import torch
class _Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.elapsed_ = 0.0
self.started_ = False
self.start_time = time.time()
def start(self):
"""Start the timer."""
assert not self.started_, "timer has already been started"
torch.cuda.synchronize()
self.start_time = time.time()
self.started_ = True
def stop(self):
"""Stop the timer."""
assert self.started_, "timer is not started"
torch.cuda.synchronize()
self.elapsed_ += time.time() - self.start_time
self.started_ = False
def reset(self):
"""Reset timer."""
self.elapsed_ = 0.0
self.started_ = False
def elapsed(self, reset=True):
"""Calculate the elapsed time."""
started_ = self.started_
# If the timing in progress, end it first.
if self.started_:
self.stop()
# Get the elapsed time.
elapsed_ = self.elapsed_
# Reset the elapsed time
if reset:
self.reset()
# If timing was in progress, set it back.
if started_:
self.start()
return elapsed_
class _Timers:
"""Group of timers."""
def __init__(self):
self.timers = {}
def __call__(self, name):
if name not in self.timers:
self.timers[name] = _Timer(name)
return self.timers[name]
def write(self, names, writer, iteration, normalizer=1.0, reset=False):
"""Write timers to a tensorboard writer"""
# currently when using add_scalars,
# torch.utils.add_scalars makes each timer its own run, which
# polutes the runs list, so we just add each as a scalar
assert normalizer > 0.0
for name in names:
value = self.timers[name].elapsed(reset=reset) / normalizer
writer.add_scalar(name + "-time", value, iteration)
def log(self, names, normalizer=1.0, reset=True):
"""Log a group of timers."""
assert normalizer > 0.0
string = "time (ms)"
for name in names:
elapsed_time = self.timers[name].elapsed(reset=reset) * 1000.0 / normalizer
string += " | {}: {:.2f}".format(name, elapsed_time)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1):
print(string, flush=True)
else:
print(string, flush=True)
|
apex-master
|
apex/transformer/pipeline_parallel/_timers.py
|
from apex.transformer.pipeline_parallel.schedules import get_forward_backward_func
from apex.transformer.pipeline_parallel.schedules.common import build_model
__all__ = [
"get_forward_backward_func",
"build_model",
]
|
apex-master
|
apex/transformer/pipeline_parallel/__init__.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for pipeline model parallel."""
from typing import Optional, List, Union, Tuple
import torch
from torch.nn.parallel import DistributedDataParallel
from apex.multi_tensor_apply import multi_tensor_applier
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.microbatches import build_num_microbatches_calculator
from apex.transformer.pipeline_parallel._timers import _Timers
if multi_tensor_applier.available:
import amp_C
_GLOBAL_ARGS = None
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
_GLOBAL_TOKENIZER = None
_GLOBAL_TENSORBOARD_WRITER = None
_GLOBAL_AUTORESUME = None
_GLOBAL_TIMERS = None
Shape = Union[List[int], torch.Size]
def listify_model(model: Union[torch.nn.Module, List[torch.nn.Module]]) -> List[torch.nn.Module]:
if isinstance(model, list):
return model
return [model]
def _ensure_var_is_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is not None, "{} is not initialized.".format(name)
def _ensure_var_is_not_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is None, "{} is already initialized.".format(name)
def setup_microbatch_calculator(
rank: int,
rampup_batch_size: Optional[List[int]],
global_batch_size: int,
micro_batch_size: int,
data_parallel_size: int,
) -> None:
global _GLOBAL_NUM_MICROBATCHES_CALCULATOR
_ensure_var_is_not_initialized(_GLOBAL_NUM_MICROBATCHES_CALCULATOR, 'num microbatches calculator')
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = build_num_microbatches_calculator(
rank, rampup_batch_size, global_batch_size, micro_batch_size, data_parallel_size)
def _reconfigure_microbatch_calculator(
rank: int,
rampup_batch_size: Optional[List[int]],
global_batch_size: int,
micro_batch_size: int,
data_parallel_size: int,
) -> None:
if torch.distributed.get_rank() == 0:
import warnings
warnings.warn("This function is only for unittest")
global _GLOBAL_NUM_MICROBATCHES_CALCULATOR
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = build_num_microbatches_calculator(
rank, rampup_batch_size, global_batch_size, micro_batch_size, data_parallel_size)
def get_micro_batch_size():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.micro_batch_size
def get_num_microbatches():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()
def get_current_global_batch_size():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()
def update_num_microbatches(consumed_samples, consistency_check=True):
_GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples, consistency_check)
# note (mkozuki): Comment out in favor of `get_kth_microbatch`
def _split_batch_into_microbatch(
batch: List[torch.Tensor],
*,
_micro_batch_size: Optional[int] = None,
_global_batch_size: Optional[int] = None,
) -> List[List[torch.Tensor]]:
micro_batch_size = _micro_batch_size
global_batch_size = _global_batch_size
if micro_batch_size is None:
micro_batch_size = get_micro_batch_size()
if global_batch_size is None:
global_batch_size = get_current_global_batch_size()
for i in range(0, global_batch_size, micro_batch_size):
yield [x[i * micro_batch_size:(i + 1) * micro_batch_size] for x in batch]
# TODO(mkozuki): Support non-tensor local minibatches?
def get_kth_microbatch(batch: Optional[List[torch.Tensor]], k: int) -> List[torch.Tensor]:
"""Create a list of microbatches from a list of local minibatches.
This function creates a list of `k`th microbatches from a list of local minibatches.
`a local minibatch` consists of `global_batch_size / data_parallel_size` samples.
"""
if batch is None or not isinstance(batch, (List, Tuple)):
return batch
micro_batch_size = get_micro_batch_size()
start = k * micro_batch_size
end = start + micro_batch_size
microbatch = list()
for x in batch:
size = x.size(0)
assert size > start and size >= end
microbatch.append(x[start:end])
assert len(microbatch) > 0
return microbatch
def get_autoresume():
return _GLOBAL_AUTORESUME
def _set_timers():
"""Initialize timers."""
global _GLOBAL_TIMERS
_ensure_var_is_not_initialized(_GLOBAL_TIMERS, "timers")
_GLOBAL_TIMERS = _Timers()
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, "timers")
return _GLOBAL_TIMERS
def print_rank_0(message: str) -> None:
"""If distributed is initialized, print only on rank 0."""
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
def is_last_rank():
return torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1)
def print_rank_last(message):
"""If distributed is initialized, print only on last rank."""
if torch.distributed.is_initialized():
if is_last_rank():
print(message, flush=True)
else:
print(message, flush=True)
def param_is_not_shared(param: torch.nn.Parameter) -> bool:
return getattr(param, "shared", False)
def unwrap_model(model, module_instances=(DistributedDataParallel,)):
return_list = True
if not isinstance(model, list):
model = [model]
return_list = False
unwrapped_model = []
for model_module in model:
while isinstance(model_module, module_instances):
model_module = model_module.module
unwrapped_model.append(model_module)
if not return_list:
return unwrapped_model[0]
return unwrapped_model
def get_model_type(
model: torch.nn.Module,
) -> ModelType:
"""Get `model_type` of `model`.
If ``model`` doesn't have ``model_type`` attribute, return ``ModelType.encoder_or_decoder``.
Args:
model
"""
return getattr(unwrap_model(model), "model_type", ModelType.encoder_or_decoder)
def calc_params_l2_norm(model: torch.nn.Module, bf16: bool):
"""Calculate l2 norm of parameters """
# args = get_args()
if not isinstance(model, list):
model = [model]
# Remove duplicate params.
params_data = []
for model_ in model:
for param in model_.parameters():
is_not_shared = param_is_not_shared(param)
is_not_tp_duplicate = parallel_state.param_is_not_tensor_parallel_duplicate(param)
if is_not_shared and is_not_tp_duplicate:
if bf16:
params_data.append(param.data.float())
else:
params_data.append(param.data)
# Calculate norm
dummy_overflow_buf = torch.cuda.IntTensor([0])
norm, _ = multi_tensor_applier(
amp_C.multi_tensor_l2norm, dummy_overflow_buf, [params_data], False # no per-parameter norm
)
norm_2 = norm * norm
# Sum across all model-parallel GPUs.
torch.distributed.all_reduce(
norm_2, op=torch.distributed.ReduceOp.SUM, group=parallel_state.get_model_parallel_group()
)
return norm_2.item() ** 0.5
def average_losses_across_data_parallel_group(losses):
"""Reduce a tensor of losses across all GPUs."""
averaged_losses = torch.cat([loss.clone().detach().view(1) for loss in losses])
torch.distributed.all_reduce(averaged_losses, group=parallel_state.get_data_parallel_group())
averaged_losses = averaged_losses / torch.distributed.get_world_size(
group=parallel_state.get_data_parallel_group()
)
return averaged_losses
def report_memory(name):
"""Simple GPU memory report."""
mega_bytes = 1024.0 * 1024.0
string = name + " memory (MB)"
string += " | allocated: {}".format(torch.cuda.memory_allocated() / mega_bytes)
string += " | max allocated: {}".format(torch.cuda.max_memory_allocated() / mega_bytes)
string += " | reserved: {}".format(torch.cuda.memory_reserved() / mega_bytes)
string += " | max reserved: {}".format(torch.cuda.max_memory_reserved() / mega_bytes)
if parallel_state.get_data_parallel_rank() == 0:
print("[Rank {}] {}".format(torch.distributed.get_rank(), string), flush=True)
def print_params_min_max_norm(optimizer, iteration):
"""Print min, max, and norm of all parameters."""
index = 0
rank = torch.distributed.get_rank()
string = "iteration, rank, index, tensor-model-parallel, min, max, norm\n"
optimizer_ = optimizer.optimizer
for param_group in optimizer_.param_groups:
for param in param_group["params"]:
index += 1
min_ = param.data.min()
max_ = param.data.max()
norm = torch.linalg.norm(param.data)
string += "{:7d}, {:4d}, {:4d}, {:2d}, ".format(
iteration, rank, index, int(param.tensor_model_parallel)
)
string += "{:.6E}, {:.6E}, {:.6E}\n".format(min_, max_, norm)
print(string, flush=True)
# NOTE (mkozuki): APEX doesn't have anything equivalent for
# `_GLOBAL_ADLR_AUTORESUME` like Megatron-LM.
# def check_adlr_autoresume_termination(iteration, model, optimizer, lr_scheduler, save: bool):
# """Check for autoresume signal and exit if it is received."""
# from apex.ppu.checkpointing import save_checkpoint
#
# autoresume = get_adlr_autoresume()
# # Add barrier to ensure consistency.
# torch.distributed.barrier()
# if autoresume.termination_requested():
# if save:
# save_checkpoint(iteration, model, optimizer, lr_scheduler)
# print_rank_0(">>> autoresume termination request found!")
# if torch.distributed.get_rank() == 0:
# autoresume.request_resume()
# print_rank_0(">>> training terminated. Returning")
# sys.exit(0)
def get_ltor_masks_and_position_ids(
data, eod_token, reset_position_ids, reset_attention_mask, eod_mask_loss
):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.size()
# Attention mask (lower triangular).
if reset_attention_mask:
att_mask_batch = micro_batch_size
else:
att_mask_batch = 1
attention_mask = torch.tril(
torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
).view(att_mask_batch, 1, seq_length, seq_length)
# Loss mask.
loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
if eod_mask_loss:
loss_mask[data == eod_token] = 0.0
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data)
# We need to clone as the ids will be modifed based on batch index.
if reset_position_ids:
position_ids = position_ids.clone()
if reset_position_ids or reset_attention_mask:
# Loop through the batches:
for b in range(micro_batch_size):
# Find indecies where EOD token is.
eod_index = position_ids[b, data[b] == eod_token]
# Detach indecies from positions if going to modify positions.
if reset_position_ids:
eod_index = eod_index.clone()
# Loop through EOD indecies:
prev_index = 0
for j in range(eod_index.size()[0]):
i = eod_index[j]
# Mask attention loss.
if reset_attention_mask:
attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
# Reset positions.
if reset_position_ids:
position_ids[b, (i + 1) :] -= i + 1 - prev_index
prev_index = i + 1
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, loss_mask, position_ids
|
apex-master
|
apex/transformer/pipeline_parallel/utils.py
|
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(mkozuki): Consider removing `timers`.
from functools import reduce
import operator
from typing import Union, Optional, Tuple
import torch
from apex.transformer import parallel_state
from apex.transformer.log_util import get_transformer_logger
from apex.transformer.utils import split_tensor_into_1d_equal_chunks
from apex.transformer.utils import gather_split_1d_tensor
from apex.transformer.pipeline_parallel.utils import Shape
from apex.transformer.pipeline_parallel._timers import _Timers
_logger = get_transformer_logger(__name__)
class FutureTensor:
def __init__(self, tensor: torch.Tensor, waitfunc):
self.tensor = tensor
self.waitfunc = waitfunc
def get(self):
if self.waitfunc is not None:
res = self.waitfunc()
if isinstance(res, torch.Tensor):
self.tensor = res
self.waitfunc = None
return self.tensor
def _run_p2pops(
tensor_send_prev: Union[torch.Tensor, None],
tensor_send_next: Union[torch.Tensor, None],
tensor_recv_prev: Union[torch.Tensor, None],
tensor_recv_next: Union[torch.Tensor, None],
async_comm: bool = False,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
):
p2p_group = parallel_state.get_pipeline_model_parallel_group()
default_group = parallel_state.get_model_parallel_group()
need_to_sync = p2p_group.name() != default_group.name()
reqs = []
ops = []
if batch_p2p_comm and p2p_group.name() == "nccl":
if tensor_send_prev is not None:
send_prev_op = torch.distributed.P2POp(
op=torch.distributed.isend,
tensor=tensor_send_prev,
peer=parallel_state.get_pipeline_model_parallel_prev_rank(),
group=p2p_group,
)
ops.append(send_prev_op)
if tensor_recv_prev is not None:
recv_prev_op = torch.distributed.P2POp(
op=torch.distributed.irecv,
tensor=tensor_recv_prev,
peer=parallel_state.get_pipeline_model_parallel_prev_rank(),
group=p2p_group,
)
ops.append(recv_prev_op)
if tensor_send_next is not None:
send_next_op = torch.distributed.P2POp(
op=torch.distributed.isend,
tensor=tensor_send_next,
peer=parallel_state.get_pipeline_model_parallel_next_rank(),
group=p2p_group,
)
ops.append(send_next_op)
if tensor_recv_next is not None:
recv_next_op = torch.distributed.P2POp(
op=torch.distributed.irecv,
tensor=tensor_recv_next,
peer=parallel_state.get_pipeline_model_parallel_next_rank(),
group=p2p_group,
)
ops.append(recv_next_op)
if len(ops) > 0:
# sync before communication if needed
if need_to_sync:
torch.cuda.synchronize()
reqs = torch.distributed.batch_isend_irecv(ops)
else:
# sync before communication if needed
if need_to_sync and any([
tensor_send_prev is not None, tensor_recv_prev is not None,
tensor_send_next is not None, tensor_recv_next is not None]):
torch.cuda.synchronize()
if tensor_send_prev is not None:
send_prev_req = torch.distributed.isend(
tensor=tensor_send_prev,
dst=parallel_state.get_pipeline_model_parallel_prev_rank(),
group=p2p_group,
)
reqs.append(send_prev_req)
if tensor_recv_prev is not None:
recv_prev_req = torch.distributed.irecv(
tensor=tensor_recv_prev,
src=parallel_state.get_pipeline_model_parallel_prev_rank(),
group=p2p_group,
)
reqs.append(recv_prev_req)
if tensor_send_next is not None:
send_next_req = torch.distributed.isend(
tensor=tensor_send_next,
dst=parallel_state.get_pipeline_model_parallel_next_rank(),
group=p2p_group,
)
reqs.append(send_next_req)
if tensor_recv_next is not None:
recv_next_op = torch.distributed.irecv(
tensor=tensor_recv_next,
src=parallel_state.get_pipeline_model_parallel_next_rank(),
group=p2p_group,
)
reqs.append(recv_next_op)
if len(reqs) > 0:
if overlap_p2p_comm:
return (None, None, None, None, reqs)
if async_comm:
if len(ops) == 0 or len(reqs) == len(ops):
tensor_send_prev_req = None if tensor_send_prev is None else reqs.pop(0)
tensor_recv_prev_req = None if tensor_recv_prev is None else reqs.pop(0)
tensor_send_next_req = None if tensor_send_next is None else reqs.pop(0)
tensor_recv_next_req = None if tensor_recv_next is None else reqs.pop(0)
elif len(reqs) == 1:
tensor_send_prev_req = None if tensor_send_prev is None else reqs[0]
tensor_recv_prev_req = None if tensor_recv_prev is None else reqs[0]
tensor_send_next_req = None if tensor_send_next is None else reqs[0]
tensor_recv_next_req = None if tensor_recv_next is None else reqs[0]
else:
assert False, "failed to manage p2p requests and handles"
return (tensor_send_prev_req, tensor_recv_prev_req, tensor_send_next_req, tensor_recv_next_req, None)
else:
for req in reqs:
req.wait()
return (None, None, None, None, None)
return (None, None, None, None, None)
# TODO(mkozuki): Check if it's possible to sunset `override_scatter_gather_tensors_in_pipeline`.
# TODO(mkozuki): Think about if it's possible to push some logic and arguments e.g.
# `scatter_gather_tensors_in_pipeline`, `sequence_parallel_enabled`, and
# `override_scatter_gather_tensors_in_pipeline` # to the user of
# apex.transformer forward_backwardfunctions.
def _communicate(
tensor_send_next: Optional[torch.Tensor],
tensor_send_prev: Optional[torch.Tensor],
recv_prev: bool,
recv_next: bool,
tensor_shape: Optional[Shape] = None,
override_scatter_gather_tensors_in_pipeline: bool = False,
dtype_: Optional[torch.dtype] = None,
*,
scatter_gather_tensors_in_pipeline: bool = True,
params_dtype: Optional[torch.dtype] = None,
fp32_residual_connection: bool = False,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
) -> Tuple[Union[torch.Tensor, FutureTensor, None], Union[torch.Tensor, FutureTensor, None]]:
"""Base function for communication of tensors between stages.
.. note::
Reference https://gitlab-master.nvidia.com/ADLR/megatron-lm/-/blob/cfd2e2160700b7f2c1bf35298ac14bc341f4c759/megatron/p2p_communication.py#L24-L159
dtype logic: If none of ``dtype_``, ``params_dtype``, ``fp32_residual_connection`` is specified,
torch.float32 is used.
See https://github.com/NVIDIA/Megatron-LM/blob/d41696840ed0a7edb7e0499eb82a48ae112d9bb3/megatron/arguments.py#L145-L159
for the details of arguments of ``dtype_``, ``params_dtype``, ``fp32_residual_connection``.
Args:
tensor_send_next: tensor to send to next rank (no tensor sent if set to None).
tensor_send_prev: tensor to send to prev rank (no tensor sent if set to None).
recv_prev: boolean for whether tensor should be received from previous rank.
recv_next: boolean for whether tensor should be received from next rank.
tensor_shape: optional, use when the input sequence contains less tokens than the default sequence length
override_scatter_gather_tensors_in_pipeline:
optional, this is used when tensor_shape is provided to override scatter gather tensors
dtype_: This is used when tensor_shape is provided and what is the type of tensor_shape
Keyword args:
scatter_gather_tensors_in_pipeline: Optional. If :obj:`True`, use scatter/gather to optimize communication of tensors.
params_dtype: Optional and legacy. Defaults to torch.float. If you manually call `.half()` or `.bfloat16()` on
your model deliberately, pass this argument.
fp32_residual_connection: Optional. If :obj:`True`, move residual connections to fp32.
sequence_parallel_enabled: Set to :obj:`True` if sequence parallel is enabled.
This argument is here for consistency with Megatron-LM.
This argument has an effect on the communication optimization, not on tensor_shape update.
sync_batch_comm: If :obj:`False`, disable cuda synchronization after the batched communication.
To disable, https://github.com/pytorch/pytorch/pull/82450 would be required.
overlap_p2p_comm: If :obj:`True`, returns cuda wait handles to scheduler instead of completing
the communication within the p2p transfer API instance. The scheduler manages the communication completion
to overlap with computation.
batch_p2p_comm: If :obj:`True`, use the batched send and receive api to conduct the communication of
a collection of send and receive operations between peer. If :obj:`False`, conduct each send and recv operation
individually.
Returns:
tuple containing
- tensor_recv_prev: `torch.Tensor` if `recv_prev` is :obj:`True`, `None` otherwise.
- tensor_recv_next: `torch.Tensor` if `recv_next` is :obj:`True`, `None` otherwise.
"""
if async_comm and sequence_parallel_enabled:
import warnings # NOQA
class ExperimentalWarning(UserWarning): pass # NOQA
warnings.warn(
"The combination of `async_comm` and `sequence_parallel_enabled` is not well tested.",
ExperimentalWarning,
)
# Create placeholder tensors for receive in forward and backward directions if needed.
tensor_recv_prev = None
tensor_recv_next = None
if tensor_shape is None:
# In megatron, `tensor_shape` is set to `(args.seq_length, args.micro_batch_size, args.hidden_size)`
raise RuntimeError(
"`tensor_shape` must be specified. Common `tensor_shape` is `(seq_length, micro_batch_size, hidden_size)`")
tensor_parallel_size = parallel_state.get_tensor_model_parallel_world_size()
override_scatter_gather_tensors_in_pipeline_ = False
# TODO(mkozuki): Demystify hardcode False of `scatter_gather_tensors_in_pipeline` and add a testcase if possible.
# NOTE(mkozuki): This is super strange and doesn't make sense to me. I have no idea what is happening here.
# However, I can say that this hardcoding override is necessary for sequence parallel in nemo megatron to work.
# I've not managed to reproduce the hang using standalone GPT with sequence parallel.
# The hang in NeMo Megatron happens in the 3rd iteration, the last iteration of stead phase inside
# forward_backward_pipelining_without_interleaving, pipeline parallel rank of 0 (tensor model parallel world
# size of 2 and pipeline model parallel world size of 2). The commit then of APEX and NeMo were
# https://github.com/NVIDIA/apex/pull/1396/commits/3060c98dd8ba42abf7702ea9d2cff0f39ea74f45 and
# https://github.com/NVIDIA/NeMo/pull/4232/commits/1cb32dfca2ab9b20f53ebdb84476c34cb42f0205.
# The PyTorch version was 1.13.0a0+git2d354cd, for what is worth.
# Currently, indiscriminately this is set to `False`, which can lead to an unexpected performance regression
# for non sequence parallel case.
scatter_gather_tensors_in_pipeline = False
if scatter_gather_tensors_in_pipeline and not sequence_parallel_enabled:
tensor_chunk_size = int(reduce(operator.mul, tensor_shape, 1))
if tensor_chunk_size % tensor_parallel_size == 0:
tensor_chunk_shape = [tensor_chunk_size // tensor_parallel_size]
else:
tensor_chunk_shape = tensor_shape
override_scatter_gather_tensors_in_pipeline_ = True
else:
tensor_chunk_shape = tensor_shape
# The dtype logic below is copied from NVIDIA/Megatron-LM repo:
# https://github.com/NVIDIA/Megatron-LM/blob/d41696840ed0a7edb7e0499eb82a48ae112d9bb3/megatron/p2p_communication.py#L74-L81
dtype = params_dtype or torch.float
if fp32_residual_connection:
dtype = torch.float
requires_grad = True
if dtype_ is not None:
dtype = dtype_
# TODO(mkozuki): Figure out why this logic of requires_grad isn't working
# when sequence_parallel_enabled=True. Otherwise, `x.retain_grad()` of
# https://github.com/crcrpar/apex/blob/069832078a652b4bd8a99db84faf953a81415ab3/apex/transformer/pipeline_parallel/schedules/common.py#L360
# fails.
# requires_grad = False
if recv_prev:
tensor_recv_prev = torch.empty(
tensor_chunk_shape,
requires_grad=requires_grad,
device=torch.cuda.current_device(),
dtype=dtype,
)
if recv_next:
tensor_recv_next = torch.empty(
tensor_chunk_shape,
requires_grad=requires_grad,
device=torch.cuda.current_device(),
dtype=dtype,
)
# Split tensor into smaller chunks if using scatter-gather optimization.
scatter_gather_optimization_doable = (
not override_scatter_gather_tensors_in_pipeline_
and scatter_gather_tensors_in_pipeline
and not sequence_parallel_enabled
)
if scatter_gather_optimization_doable:
if tensor_send_next is not None:
tensor_send_next = split_tensor_into_1d_equal_chunks(tensor_send_next)
if tensor_send_prev is not None:
tensor_send_prev = split_tensor_into_1d_equal_chunks(tensor_send_prev)
# Send tensors in both the forward and backward directions as appropriate.
tensor_send_prev_req, tensor_recv_prev_req, tensor_send_next_req, tensor_recv_next_req, wait_handles = _run_p2pops(
tensor_send_prev, tensor_send_next, tensor_recv_prev, tensor_recv_next, async_comm, overlap_p2p_comm, batch_p2p_comm)
if async_comm:
tensor_recv_prev_waitfunc = None
tensor_recv_next_waitfunc = None
# TODO: investigate whether this is necessary for correctness (ref: https://github.com/pytorch/pytorch/issues/38642)
# see also: sync added for async_comm callbacks below in gather_recv_prev_wait and gather_recv_next_wait
if tensor_recv_prev_req is not None:
def tensor_recv_prev_wait():
tensor_recv_prev_req.wait()
torch.cuda.synchronize()
tensor_recv_prev_waitfunc = tensor_recv_prev_wait
if tensor_recv_next_req is not None:
def tensor_recv_next_wait():
tensor_recv_next_req.wait()
torch.cuda.synchronize()
tensor_recv_next_waitfunc = tensor_recv_next_wait
else:
if sync_batch_comm:
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
# If using scatter-gather optimization, gather smaller chunks.
if scatter_gather_optimization_doable:
if not async_comm:
if recv_prev:
tensor_recv_prev = (
gather_split_1d_tensor(tensor_recv_prev)
.view(tensor_shape)
.requires_grad_()
)
if recv_next:
tensor_recv_next = (
gather_split_1d_tensor(tensor_recv_next)
.view(tensor_shape)
.requires_grad_()
)
else:
def gather_recv_prev_wait():
tensor_recv_prev_req.wait()
# From @Deepak's PR https://github.com/NVIDIA/Megatron-LM/commit/27fc468964064eeb33b703c9a0b2af938d80dd14
# A sync seems to be needed before gather otherwise losses jump around e.g., in run_gpt_minimal_test
torch.cuda.synchronize()
return (
gather_split_1d_tensor(tensor_recv_prev)
.view(tensor_shape)
.requires_grad_()
)
def gather_recv_next_wait():
tensor_recv_next_req.wait()
torch.cuda.synchronize()
return (
gather_split_1d_tensor(tensor_recv_next)
.view(tensor_shape)
.requires_grad_()
)
tensor_recv_prev_waitfunc = gather_recv_prev_wait
tensor_recv_next_waitfunc = gather_recv_next_wait
if async_comm:
future_tensor_recv_prev = None
future_tensor_recv_next = None
if tensor_recv_prev is not None:
future_tensor_recv_prev = FutureTensor(tensor_recv_prev, tensor_recv_prev_waitfunc)
if tensor_recv_next is not None:
future_tensor_recv_next = FutureTensor(tensor_recv_next, tensor_recv_next_waitfunc)
return future_tensor_recv_prev, future_tensor_recv_next, None
return tensor_recv_prev, tensor_recv_next, wait_handles
def recv_forward(
tensor_shape: Shape,
override_scatter_gather_tensors_in_pipeline: bool = False,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor, None]:
"""Receive tensor from previous rank in pipeline (forward receive)."""
if parallel_state.is_pipeline_first_stage():
return None
# if timers is not None:
# timers("forward-recv").start()
input_tensor, _, _ = _communicate(
tensor_send_next=None,
tensor_send_prev=None,
recv_prev=True,
recv_next=False,
tensor_shape=tensor_shape,
override_scatter_gather_tensors_in_pipeline=override_scatter_gather_tensors_in_pipeline,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-recv").stop()
return input_tensor
def recv_backward(
tensor_shape: Shape = None,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor, None]:
"""Receive tensor from next rank in pipeline (backward receive)."""
if parallel_state.is_pipeline_last_stage():
return None
# if timers is not None:
# timers("backward-recv").start()
_, output_tensor_grad, _ = _communicate(
tensor_send_next=None,
tensor_send_prev=None,
recv_prev=False,
recv_next=True,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("backward-recv").stop()
return output_tensor_grad
def send_forward(
output_tensor: torch.Tensor,
override_scatter_gather_tensors_in_pipeline: bool = False,
tensor_shape: Shape = None,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> None:
"""Send tensor to next rank in pipeline (forward send)."""
if parallel_state.is_pipeline_last_stage():
return
# if timers is not None:
# timers("forward-send").start()
_communicate(
tensor_send_next=output_tensor,
tensor_send_prev=None,
recv_prev=False,
recv_next=False,
override_scatter_gather_tensors_in_pipeline=override_scatter_gather_tensors_in_pipeline,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-send").stop()
def send_backward(
input_tensor_grad: torch.Tensor,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> None:
"""Send tensor to previous rank in pipeline (backward send)."""
if parallel_state.is_pipeline_first_stage():
return
# if timers is not None:
# timers("backward-send").start()
_communicate(
tensor_send_next=None,
tensor_send_prev=input_tensor_grad,
recv_prev=False,
recv_next=False,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("backward-send").stop()
def send_forward_recv_backward(
output_tensor: torch.Tensor,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor, None]:
"""Batched send and recv with next rank in pipeline."""
if parallel_state.is_pipeline_last_stage():
return None
# if timers is not None:
# timers("forward-send-backward-recv").start()
_, output_tensor_grad, _ = _communicate(
tensor_send_next=output_tensor,
tensor_send_prev=None,
recv_prev=False,
recv_next=True,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-send-backward-recv").stop()
return output_tensor_grad
def send_backward_recv_forward(
input_tensor_grad: torch.Tensor,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor, None]:
"""Batched send and recv with previous rank in pipeline."""
if parallel_state.is_pipeline_first_stage():
return None
# if timers is not None:
# timers("backward-send-forward-recv").start()
input_tensor, _, _ = _communicate(
tensor_send_next=None,
tensor_send_prev=input_tensor_grad,
recv_prev=True,
recv_next=False,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("backward-send-forward-recv").stop()
return input_tensor
def send_forward_recv_forward(
output_tensor: torch.Tensor,
recv_prev: bool,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor]:
"""Batched recv from previous rank and send to next rank in pipeline."""
# if timers is not None:
# timers("forward-send-forward-recv").start()
input_tensor, _, wait_handles = _communicate(
tensor_send_next=output_tensor,
tensor_send_prev=None,
recv_prev=recv_prev,
recv_next=False,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=overlap_p2p_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-send-forward-recv").stop()
if overlap_p2p_comm:
return input_tensor, wait_handles
return input_tensor
def send_backward_recv_backward(
input_tensor_grad: torch.Tensor,
recv_next: bool,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Union[torch.Tensor, FutureTensor]:
"""Batched recv from next rank and send to previous rank in pipeline."""
# if timers is not None:
# timers("backward-send-backward-recv").start()
_, output_tensor_grad, wait_handles = _communicate(
tensor_send_next=None,
tensor_send_prev=input_tensor_grad,
recv_prev=False,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=overlap_p2p_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("backward-send-backward-recv").stop()
if overlap_p2p_comm:
return output_tensor_grad, wait_handles
return output_tensor_grad
def send_forward_backward_recv_forward_backward(
output_tensor: torch.Tensor,
input_tensor_grad: torch.Tensor,
recv_prev: bool,
recv_next: bool,
tensor_shape: Shape,
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
timers: _Timers = None,
) -> Tuple[Union[torch.Tensor, FutureTensor], Union[torch.Tensor, FutureTensor]]:
"""Batched send and recv with previous and next ranks in pipeline."""
# if timers is not None:
# timers("forward-backward-send-forward-backward-recv").start()
input_tensor, output_tensor_grad, wait_handles = _communicate(
tensor_send_next=output_tensor,
tensor_send_prev=input_tensor_grad,
recv_prev=recv_prev,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype_=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=overlap_p2p_comm,
batch_p2p_comm=batch_p2p_comm,
)
# if timers is not None:
# timers("forward-backward-send-forward-backward-recv").stop()
if overlap_p2p_comm:
return input_tensor, output_tensor_grad, wait_handles
return input_tensor, output_tensor_grad
|
apex-master
|
apex/transformer/pipeline_parallel/p2p_communication.py
|
import contextlib
from typing import Any, List, Optional, Sequence, Union
import warnings
import torch
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.pipeline_parallel.p2p_communication import FutureTensor
from apex.transformer.pipeline_parallel.utils import get_kth_microbatch
from apex.transformer.pipeline_parallel.utils import listify_model
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import get_model_type
from apex.transformer.pipeline_parallel.schedules.common import Batch
from apex.transformer.pipeline_parallel.schedules.common import FwdStepFunc
from apex.transformer.pipeline_parallel.schedules.common import backward_step
from apex.transformer.pipeline_parallel.schedules.common import forward_step
from apex.transformer.pipeline_parallel.schedules.common import free_output_tensor
from apex.transformer.log_util import get_transformer_logger
__all__ = ["forward_backward_pipelining_without_interleaving"]
_logger = get_transformer_logger(__name__)
def get_tensor_shapes(
rank: int,
model_type: ModelType,
*,
tensor_shape: Union[List[int], torch.Size],
decoder_sequence_length: Optional[int] = None,
sequence_parallel_enabled: bool = False,
) -> Sequence[Sequence[int]]:
"""Get tensors shapes
Args:
rank: pipeline parallel rank
model_type:
Keyword Args:
tensor_shape:
decoder_sequence_length:
sequence_parallel_enabled:
"""
# Determine right tensor sizes (based on position of rank with respect to split
# rank) and model size.
# Send two tensors if model is T5 and rank is in decoder stage:
# first tensor is decoder (pre-transpose),
# second tensor is encoder (post-transpose).
# If model is T5 and rank is at the boundary:
# send one tensor (post-transpose from encoder).
# Otherwise, send one tensor (pre-transpose).
assert (
len(tensor_shape) == 3
), f"`tensor_shape` should be [sequence_length, micro_batch_size, hidden_size] but {tensor_shape}"
sequence_length, micro_batch_size, hidden_size = tensor_shape
tensor_shapes = []
if sequence_parallel_enabled:
seq_length = sequence_length // parallel_state.get_tensor_model_parallel_world_size()
else:
seq_length = sequence_length
if model_type == ModelType.encoder_and_decoder:
if sequence_parallel_enabled:
dec_seq_length = decoder_sequence_length // parallel_state.get_tensor_model_parallel_world_size()
else:
dec_seq_length = decoder_sequence_length
if parallel_state.is_pipeline_stage_before_split(rank):
tensor_shapes.append((seq_length, micro_batch_size, hidden_size))
else:
tensor_shapes.append((dec_seq_length, micro_batch_size, hidden_size))
tensor_shapes.append((seq_length, micro_batch_size, hidden_size))
else:
tensor_shapes.append((seq_length, micro_batch_size, hidden_size))
return tensor_shapes
def recv_forward(
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
input_tensors = []
for tensor_shape in tensor_shapes:
if tensor_shape is None:
input_tensors.append(None)
else:
input_tensors.append(
p2p_communication.recv_forward(
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
)
return input_tensors
def recv_backward(
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
output_tensor_grads = []
for tensor_shape in tensor_shapes:
if tensor_shape is None:
output_tensor_grads.append(None)
else:
output_tensor_grads.append(
p2p_communication.recv_backward(
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
)
return output_tensor_grads
def send_forward(
output_tensors: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> None:
if not isinstance(output_tensors, list):
output_tensors = [output_tensors]
for (output_tensor, tensor_shape) in zip(output_tensors, tensor_shapes):
if tensor_shape is None:
continue
p2p_communication.send_forward(
output_tensor,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
def send_backward(
input_tensor_grads: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> None:
if not isinstance(input_tensor_grads, list):
input_tensor_grads = [input_tensor_grads]
for (input_tensor_grad, tensor_shape) in zip(input_tensor_grads, tensor_shapes):
if tensor_shape is None:
continue
p2p_communication.send_backward(
input_tensor_grad,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
def send_forward_recv_backward(
output_tensors: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
if not isinstance(output_tensors, list):
output_tensors = [output_tensors]
output_tensor_grads = []
for (output_tensor, tensor_shape) in zip(output_tensors, tensor_shapes):
if tensor_shape is None:
output_tensor_grads.append(None)
continue
output_tensor_grad = p2p_communication.send_forward_recv_backward(
output_tensor,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
output_tensor_grads.append(output_tensor_grad)
return output_tensor_grads
def send_backward_recv_forward(
input_tensor_grads: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
sync_batch_comm: bool = True,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
if not isinstance(input_tensor_grads, list):
input_tensor_grads = [input_tensor_grads]
input_tensors = []
for (input_tensor_grad, tensor_shape) in zip(input_tensor_grads, tensor_shapes):
if tensor_shape is None:
input_tensors.append(None)
continue
input_tensor = p2p_communication.send_backward_recv_forward(
input_tensor_grad,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
input_tensors.append(input_tensor)
return input_tensors
def forward_backward_pipelining_without_interleaving(
forward_step_func: FwdStepFunc,
batch: Optional[Batch],
model: Union[torch.nn.Module, List[torch.nn.Module]],
*,
forward_only: bool,
tensor_shape: Optional[Union[List[int], torch.Size]] = None,
decoder_sequence_length: Optional[int] = None,
dtype: Optional[torch.dtype] = None,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
disable_autocast: bool = False,
deallocate_pipeline_outputs: bool = False,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
custom_sync_context_handler: Optional[Any] = None,
custom_grad_sync_func: Optional[Any] = None,
sync_batch_comm: bool = True,
num_micro_batches_with_partial_activation_checkpoints: Optional[int] = None,
**kwargs,
) -> List[Union[torch.Tensor, Sequence[torch.Tensor]]]:
"""Run non-interleaved 1F1B schedule, with communication between pipeline stages.
This pipeline parallel scheduling consists of three steps:
1. warmup
2. 1F1B a.k.a. steady state
3. cooldown if not forward_only
Args:
forward_step_func: A function which takes a minibatch and model as its arguments and
returns model's forward output and the loss function.
The loss function is supposed to take one `torch.Tensor` and
return a `torch.Tensor` of loss and a dictionary of `str` and `torch.Tensor`.
batch: A minibatch, i.e., a list of `torch.Tensor`'s.
model: A `torch.nn.Module` or a list of `torch.nn.Module`.
Keyword args:
forward_only:
tensor_shape: Shape of tensor. The tensor is expected to be 3D and its order of dimension
is supposed to be ``(sequence, batch, hidden)``.
dtype: dtype used in p2p communication. If ``None`` (default value),
torch.float32 will be used even if ``autocast`` is enabled.
grad_scaler:
disable_autocast:
deallocate_pipeline_outputs: If :obj:`True`, free the data of the output tensor of
each pipeline stage. Experimental.
sequence_parallel_enabled: Set to :obj:`True` for this function to handle sequence length.
When :obj:`True`, the sequence length on each tensor model parallel rank is updated
to :math:`original\_sequence\_length / tensor\_model\_parallel\_world\_size`.
custom_sync_context_handler: Does nothing if ``None`` (default
value). Otherwise, a function to construct a context
manager that disable asynchronous gradient reductions.
Asynchronous gradient reductions are only enabled in the
first pipeline stage, during the last backward pass.
custom_grad_sync_func: Does nothing if ``None`` (default
value). Otherwise, a function to perform gradient
reductions. This is called in all pipeline stages except
the first, during the bubble overhead.
sync_batch_comm: If :obj:`False`, disable cuda synchronization after the batched communication.
To disable, https://github.com/pytorch/pytorch/pull/82450 would be required.
num_micro_batches_with_partial_activation_checkpoints: If :obj:`int`, set the number of
micro-batches checkpointing the activation of partial number of Transformer layers.
The rest of the micro-batch within the window of maximum outstanding micro-batch
backpropagations would checkpoint all Transformer layers.
Returns:
a list of loss `torch.Tensor`s if the last stage, empty list otherwise.
"""
# timers = get_timers()
if deallocate_pipeline_outputs:
warnings.warn(
"`deallocate_pipeline_outputs` is experimental and subject to change. "
"This option is not recommended."
)
model: List[torch.nn.Module] = listify_model(model)
if len(model) != 1:
msg = f"`model` is expected be a `nn.Module`, but {type(model)}"
raise RuntimeError(msg)
model: torch.nn.Module = model[0]
# Disable async grad reductions
if custom_sync_context_handler is not None:
sync_context_handler = custom_sync_context_handler
else:
sync_context_handler = contextlib.nullcontext
sync_context = None
def disable_grad_sync():
"""Disable asynchronous grad reductions"""
nonlocal sync_context
if sync_context is None:
sync_context = sync_context_handler()
sync_context.__enter__()
def enable_grad_sync():
"""Enable asynchronous grad reductions"""
nonlocal sync_context
if sync_context is not None:
sync_context.__exit__(None, None, None)
sync_context = None
disable_grad_sync()
# Compute number of warmup microbatches.
num_microbatches: int = get_num_microbatches()
num_warmup_microbatches: int = (
parallel_state.get_pipeline_model_parallel_world_size() - parallel_state.get_pipeline_model_parallel_rank() - 1
)
num_warmup_microbatches: int = min(num_warmup_microbatches, num_microbatches)
num_microbatches_remaining: int = num_microbatches - num_warmup_microbatches
# Checkpoint the activations of partial Transformer layers in a number of micro-batches
# within the maximum outstanding micro-batch backpropagations.
# Micro-batches with the ids less than 'num_micro_batches_with_partial_activation_checkpoints'
# checkpoint partial Transformer layers (or skip checkpointing) and
# the rest of micro-batches within a window of micro-batches checkpoint
# all Transformer layers. The window of micro-batches is set by the maximum
# outstanding backpropagations and becomes smaller at later pipeline stages.
# Please refer the appendix C in https://arxiv.org/pdf/2205.05198.pdf
max_outstanding_backprops = None
if num_micro_batches_with_partial_activation_checkpoints is not None:
max_outstanding_backprops = num_warmup_microbatches + 1
model_type = get_model_type(model)
rank: int = parallel_state.get_pipeline_model_parallel_rank()
recv_tensor_shapes: List[List[int]] = get_tensor_shapes(
rank - 1,
model_type,
tensor_shape=tensor_shape,
decoder_sequence_length=decoder_sequence_length,
sequence_parallel_enabled=sequence_parallel_enabled,
)
send_tensor_shapes: List[List[int]] = get_tensor_shapes(
rank,
model_type,
tensor_shape=tensor_shape,
decoder_sequence_length=decoder_sequence_length,
sequence_parallel_enabled=sequence_parallel_enabled,
)
_logger.info(
f"num_microbatches: {num_microbatches}, "
f"num_warmup_microbatches: {num_warmup_microbatches}, "
f"num_microbatches_remaining: {num_microbatches_remaining}"
)
# Input, output tensors only need to be saved when doing backward passes
input_tensors: List[Union[None, torch.Tensor]] = []
output_tensors: List[Union[None, torch.Tensor]] = []
losses_reduced: List[Union[None, torch.Tensor]] = []
###################################################################################################################
# Run warmup forward passes.
###################################################################################################################
_logger.info("Warmup")
for i in range(num_warmup_microbatches):
_logger.debug(f"warmup iter: {i} / {num_warmup_microbatches}")
_logger.debug("receive fwd")
# Decide to checkpoint all layers' activations of the current micro-batch
if max_outstanding_backprops is not None:
checkpoint_activations_micro_batch = (
i % max_outstanding_backprops >= num_micro_batches_with_partial_activation_checkpoints
)
else:
checkpoint_activations_micro_batch = None
input_tensor = recv_forward(
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
cur_microbatch: Optional[torch.Tensor] = get_kth_microbatch(batch, i)
output_tensor = forward_step(
forward_step_func,
cur_microbatch,
model,
input_tensor,
losses_reduced,
dtype,
disable_autocast,
checkpoint_activations_micro_batch,
)
_logger.debug("send fwd")
send_forward(
output_tensor,
tensor_shapes=send_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
if not forward_only:
input_tensors.append(input_tensor)
output_tensors.append(output_tensor)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
# Before running 1F1B, need to receive first forward tensor.
# If all microbatches are run in warmup / cooldown phase, then no need to
# receive this tensor here.
if num_microbatches_remaining > 0:
_logger.debug("recv_forward before steady state start")
input_tensor: List[Union[None, torch.Tensor, FutureTensor]] = recv_forward(
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sync_batch_comm=sync_batch_comm,
)
###################################################################################################################
# Run 1F1B in steady state.
###################################################################################################################
_logger.info("Steady phase")
for i in range(num_microbatches_remaining):
_logger.debug(f"steady iter: {i} / {num_microbatches_remaining}")
last_iteration: bool = i == (num_microbatches_remaining - 1)
# Decide to checkpoint all layers' activations of the current micro-batch
if max_outstanding_backprops is not None:
checkpoint_activations_micro_batch = (
((i+num_warmup_microbatches) % max_outstanding_backprops) >= num_micro_batches_with_partial_activation_checkpoints
)
else:
checkpoint_activations_micro_batch = None
cur_microbatch: Optional[torch.Tensor] = get_kth_microbatch(batch, i + num_warmup_microbatches)
output_tensor: Union[torch.Tensor, Sequence[torch.Tensor]] = forward_step(
forward_step_func,
cur_microbatch,
model,
input_tensor,
losses_reduced,
dtype,
disable_autocast,
checkpoint_activations_micro_batch,
)
if forward_only:
_logger.debug("send fwd")
send_forward(
output_tensor,
tensor_shapes=send_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
if not last_iteration:
_logger.debug("receive fwd (last iteration)")
input_tensor = recv_forward(
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
else:
_logger.debug("send fwd & receive bwd")
output_tensor_grad = send_forward_recv_backward(
output_tensor,
tensor_shapes=send_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
# Add input_tensor and output_tensor to end of list.
input_tensors.append(input_tensor)
output_tensors.append(output_tensor)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
# Pop input_tensor and output_tensor from the start of the list for the backward pass.
input_tensor = input_tensors.pop(0)
output_tensor = output_tensors.pop(0)
input_tensor_grad = backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
deallocate_pipeline_outputs=deallocate_pipeline_outputs,
)
if last_iteration:
input_tensor = None
_logger.debug("send bwd")
send_backward(
input_tensor_grad,
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
else:
_logger.debug("send bwd and receive fwd")
input_tensor = send_backward_recv_forward(
input_tensor_grad,
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
###################################################################################################################
# Run cooldown backward passes.
###################################################################################################################
_logger.info("Cooldown phase")
if not forward_only:
for i in range(num_warmup_microbatches):
_logger.debug(f"cooldown iter: {i} / {num_warmup_microbatches}")
if i == num_warmup_microbatches-1 and rank == 0:
# Async grad reduction in first pipeline stage, during
# last backward pass
enable_grad_sync()
input_tensor = input_tensors.pop(0)
output_tensor = output_tensors.pop(0)
_logger.debug("receive bwd")
output_tensor_grad = recv_backward(
tensor_shapes=send_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
input_tensor_grad = backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
deallocate_pipeline_outputs=deallocate_pipeline_outputs,
)
_logger.debug("send bwd")
send_backward(
input_tensor_grad,
tensor_shapes=recv_tensor_shapes,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
)
# Grad reduction in all pipeline stages except the first, during
# the bubble overhead
enable_grad_sync()
if rank != 0 and custom_grad_sync_func is not None:
custom_grad_sync_func()
return losses_reduced
|
apex-master
|
apex/transformer/pipeline_parallel/schedules/fwd_bwd_pipelining_without_interleaving.py
|
import contextlib
from typing import List, Union, Optional
import torch
from apex.transformer.pipeline_parallel.utils import listify_model
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import get_kth_microbatch
from apex.transformer.pipeline_parallel.utils import get_model_type
from apex.transformer.pipeline_parallel.schedules.common import Batch
from apex.transformer.pipeline_parallel.schedules.common import FwdStepFunc
from apex.transformer.pipeline_parallel.schedules.common import forward_step
from apex.transformer.pipeline_parallel.schedules.common import backward_step
from apex.transformer.log_util import get_transformer_logger
_all__ = ["forward_backward_no_pipelining"]
_logger = get_transformer_logger(__name__)
def forward_backward_no_pipelining(
forward_step_func: FwdStepFunc,
batch: Batch,
model: Union[torch.nn.Module, List[torch.nn.Module]],
*,
forward_only: bool,
dtype: Optional[torch.dtype] = None,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
disable_autocast: bool = False,
custom_sync_context_handler=None,
**kwargs,
):
"""Run forward and backward passes with no pipeline parallelism (no inter-stage communication).
This pipeline parallel scheduling handles the last microbatch differently to synchronize gradients.
Args:
forward_step_func: A function which takes a minibatch and model as its arguments and
returns model's forward output and the loss function.
The loss function is supposed to take one `torch.Tensor` and
return a `torch.Tensor` of loss and a dictionary of `str` and `torch.Tensor`.
batch: A List of torch.Tensors
model: A `torch.nn.Module` or a list of `torch.nn.Module`.
Keyword args:
forward_only:
grad_scaler:
dtype:
disable_autocast: Turn off `enabled` flag of `torch.cuda.amp.autocast` if :obj:`True`.
Should be used when your forward and loss computation is in the autocast context to
avoid unnecesarily nest autocast context.
custom_sync_context_handler: Context manager to disable asynchronous gradient reductions.
**kwargs: Added to handle `tensor_shape` which has no effect on this function.
Returns:
a list of dictionaries of loss `torch.Tensor`s if the last stage, empty list otherwise.
"""
model = listify_model(model)
if len(model) != 1:
msg = f"`model` is expected be a `nn.Module`, but {type(model)}"
raise RuntimeError(msg)
model = model[0]
model_type = get_model_type(model)
if custom_sync_context_handler is not None:
context_handler = custom_sync_context_handler
elif isinstance(model, torch.nn.parallel.distributed.DistributedDataParallel):
context_handler = model.no_sync
else:
context_handler = contextlib.nullcontext
losses_reduced = []
input_tensor, output_tensor_grad = None, None
num_micro_batches = get_num_microbatches()
with context_handler():
for i in range(num_micro_batches - 1):
_logger.info(f"Iter {i} of {num_micro_batches - 1}")
cur_micro_batch = get_kth_microbatch(batch, i)
_logger.debug("Call `forward_step`")
output_tensor = forward_step(
forward_step_func,
cur_micro_batch,
model,
input_tensor,
losses_reduced,
dtype=dtype,
disable_autocast=disable_autocast,
)
if not forward_only:
_logger.debug("Call `backward_step`")
backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
)
# Run computation for last microbatch out of context handler (want to
# synchronize gradients).
_logger.info("Cooldown")
_logger.debug("Call `forward_step`")
output_tensor = forward_step(
forward_step_func,
get_kth_microbatch(batch, num_micro_batches - 1),
model,
input_tensor,
losses_reduced,
dtype=dtype,
disable_autocast=disable_autocast,
)
if not forward_only:
_logger.debug("Call `backward_step`")
backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
)
return losses_reduced
|
apex-master
|
apex/transformer/pipeline_parallel/schedules/fwd_bwd_no_pipelining.py
|
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import (
forward_backward_no_pipelining,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_with_interleaving import (
_forward_backward_pipelining_with_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
__all__ = [
"get_forward_backward_func",
]
class ExperimentalWarning(Warning):
pass
def get_forward_backward_func(
virtual_pipeline_model_parallel_size, pipeline_model_parallel_size,
):
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if virtual_pipeline_model_parallel_size is not None:
if get_num_microbatches() % pipeline_model_parallel_size != 0:
msg = "number of microbatches is not divisible by pipeline-parallel size when using interleaved schedule"
raise RuntimeError(msg)
forward_backward_func = _forward_backward_pipelining_with_interleaving
else:
forward_backward_func = forward_backward_pipelining_without_interleaving
else:
forward_backward_func = forward_backward_no_pipelining
return forward_backward_func
|
apex-master
|
apex/transformer/pipeline_parallel/schedules/__init__.py
|
from typing import Any, Callable, Dict, List, Tuple, Union, Optional, Sequence
import torch
from torch.autograd.variable import Variable
from apex.normalization.fused_layer_norm import FusedLayerNorm
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.pipeline_parallel.p2p_communication import FutureTensor
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import listify_model
from apex.transformer.pipeline_parallel.utils import unwrap_model
from apex.transformer.pipeline_parallel.utils import get_model_type
from apex.transformer.tensor_parallel.layers import (
set_defaults_if_not_set_tensor_model_parallel_attributes,
)
from apex.transformer.log_util import get_transformer_logger
_logger = get_transformer_logger(__name__)
Batch = Union[torch.Tensor, FutureTensor, List[Union[torch.Tensor, FutureTensor]], Tuple[Union[torch.Tensor, FutureTensor], ...]]
LossFunc = Callable[[torch.Tensor], torch.Tensor]
FwdStepFunc = Callable[
[Optional[Batch], torch.nn.Module], Tuple[torch.Tensor, LossFunc]
]
def build_model(
model_provider_func: Callable[[Any, Dict[str, Any]], torch.nn.Module],
wrap_with_ddp: bool = True,
virtual_pipeline_model_parallel_size: Optional[int] = None,
model_type: ModelType = ModelType.encoder_or_decoder,
*args: Any,
**kwargs: Any,
) -> List[torch.nn.Module]:
"""Build the model satisfying pipeline model parallel requirements.
This function sets `pre_process` and `post_process` to `**kwargs` and pass `*args` and `**kwargs` to
`model_provider_func`.
Args:
model_provider_func: A function which takes `*args` and `**kwargs` and returns a `nn.Module`.
wrap_with_ddp: If :obj:`True`, wrap the instantiated model
with `torch.nn.parallel.distributed.DistributedDataParallel`, a.k.a. `DDP`.
virtual_pipeline_model_parallel_size: Specify when using interleaving scheduling pipeline model parallel.
model_type:
*args: arguments for model provider func
**kwargs: Keyword arguments for model provider func
Returns:
a list of `nn.Module`(s). If `virtual_pipeline_model_parallel_size` is not None,
the list has multiple models, otherwise one.
"""
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and virtual_pipeline_model_parallel_size is not None
):
model = []
for i in range(virtual_pipeline_model_parallel_size):
cur_args = args
cur_kwargs = kwargs
parallel_state.set_virtual_pipeline_model_parallel_rank(i)
# Set pre_process and post_process only after virtual rank is set.
pre_process = parallel_state.is_pipeline_first_stage()
post_process = parallel_state.is_pipeline_last_stage()
cur_kwargs.update(
{"pre_process": pre_process, "post_process": post_process,}
)
this_model = model_provider_func(*cur_args, **cur_kwargs)
model.append(this_model)
else:
cur_args = args
cur_kwargs = kwargs
if model_type == ModelType.encoder_or_decoder:
pre_process = parallel_state.is_pipeline_first_stage()
post_process = parallel_state.is_pipeline_last_stage()
cur_kwargs.update(
{"pre_process": pre_process, "post_process": post_process,}
)
model = model_provider_func(*cur_args, **cur_kwargs)
elif model_type == ModelType.encoder_and_decoder:
pre_process = parallel_state.is_pipeline_first_stage()
post_process = parallel_state.is_pipeline_last_stage()
# `add_encoder` & `add_decoder` logic.
add_encoder, add_decoder = True, True
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
split_rank = parallel_state.get_pipeline_model_parallel_split_rank()
if split_rank is None:
raise RuntimeError(
"Split rank needs to be specified for model with both encoder and decoder."
)
rank = parallel_state.get_pipeline_model_parallel_rank()
world_size = parallel_state.get_pipeline_model_parallel_world_size()
pre_process = rank == 0 or rank == split_rank
post_process = rank == (split_rank - 1) or rank == (world_size - 1)
add_encoder = parallel_state.is_pipeline_stage_before_split()
add_decoder = parallel_state.is_pipeline_stage_after_split()
cur_kwargs.update(
{
"pre_process": pre_process,
"post_process": post_process,
"add_encoder": add_encoder,
"add_decoder": add_decoder,
}
)
model = model_provider_func(*cur_args, **cur_kwargs)
model.model_type = model_type
if not isinstance(model, list):
model = [model]
# Set tensor model parallel attributes if not set.
# Only parameters that are already tensor model parallel have these
# attributes set for them. We should make sure the default attributes
# are set for all params so the optimizer can use them.
for model_module in model:
for param in model_module.parameters():
set_defaults_if_not_set_tensor_model_parallel_attributes(param)
# Print number of parameters.
if (
parallel_state.model_parallel_is_initialized()
and parallel_state.get_data_parallel_rank() == 0
):
msg = " > number of parameters on (tensor, pipeline) model parallel rank ({}, {}): {}".format(
parallel_state.get_tensor_model_parallel_rank(),
parallel_state.get_pipeline_model_parallel_rank(),
_calc_number_of_params(model),
)
print(msg, flush=True)
# GPU allocation.
for model_module in model:
model_module.cuda(torch.cuda.current_device())
if wrap_with_ddp:
i = torch.cuda.current_device()
model = [
torch.nn.parallel.distributed.DistributedDataParallel(
model_module,
device_ids=[i],
output_device=i,
process_group=parallel_state.get_data_parallel_group(),
)
for model_module in model
]
return model
def _calc_number_of_params(model: List[torch.nn.Module]) -> int:
assert isinstance(model, list)
return sum(
[
sum([p.nelement() for p in model_module.parameters()])
for model_module in model
]
)
def _get_params_for_weight_decay_optimization(
model: Union[torch.nn.Module, List[torch.nn.Module]],
*,
no_weight_decay_modules=(FusedLayerNorm,),
) -> Dict[str, torch.nn.Parameter]:
"""Divide params into with-weight-decay and without-weight-decay groups.
Layernorms and biases will have no weight decay but the rest will.
"""
modules = listify_model(model)
weight_decay_params = {"params": []}
no_weight_decay_params = {"params": [], "weight_decay": 0.0}
for module in modules:
for module_ in module.modules():
if isinstance(module_, no_weight_decay_modules):
no_weight_decay_params["params"].extend(
[p for p in list(module_._parameters.values()) if p is not None]
)
else:
weight_decay_params["params"].extend(
[
p
for n, p in list(module_._parameters.items())
if p is not None and n != "bias"
]
)
no_weight_decay_params["params"].extend(
[
p
for n, p in list(module_._parameters.items())
if p is not None and n == "bias"
]
)
return weight_decay_params, no_weight_decay_params
def free_output_tensor(
output_tensors: Optional[Union[torch.Tensor, Sequence[torch.Tensor]]],
deallocate_pipeline_outputs: bool = False,
) -> None:
"""Pseudo-free the output tensor's `.data` field.
This method should be called right after the output tensor has been sent to the next
pipeline stage. At this point, the output tensor is only useful for its `.grad_fn` field,
and not its `.data`.
"""
if not deallocate_pipeline_outputs:
return
if output_tensors is None:
return
if isinstance(output_tensors, torch.Tensor):
output_tensors = [output_tensors]
for output_tensor in output_tensors:
output_tensor.data = torch.cuda.FloatTensor([0])
def custom_backward(output: torch.Tensor, grad_output: Optional[torch.Tensor]) -> None:
"""Directly call C++ autograd engine.
To make the `free_output_tensor` optimization work, the C++ autograd engine must be called
directly, bypassing PyTorch's `torch.autograd.backward`. PyTorch's `backward` checks that the
output and grad have the same shape, while C++ `backward` does not.
"""
assert (
output.numel() == 1
), "output should be pseudo-freed in schedule, to optimize memory consumption"
assert isinstance(output, torch.Tensor), "output == {}.".format(
type(output).__name__
)
assert isinstance(
grad_output, (torch.Tensor, type(None))
), "grad_outptu == {}.".format(type(grad_output).__name__)
# Handle scalar output
if grad_output is None:
assert output.numel() == 1, "Implicit grad requires scalar output."
grad_output = torch.ones_like(output, memory_format=torch.preserve_format)
# Call C++ engine [ see torch/csrc/autograd/python_engine.cpp ]
Variable._execution_engine.run_backward(
tensors=(output,),
grad_tensors=(grad_output,),
keep_graph=False,
create_graph=False,
inputs=(),
allow_unreachable=True,
accumulate_grad=True,
)
def forward_step(
forward_step_func: FwdStepFunc,
batch: Optional[Batch],
model: torch.nn.Module,
input_tensor: Optional[Union[torch.Tensor, List[torch.Tensor]]],
losses_reduced: List[torch.Tensor],
dtype: torch.dtype,
disable_autocast: bool = False,
checkpoint_activations_micro_batch: Optional[bool] = None,
) -> Union[torch.Tensor, Sequence[torch.Tensor]]:
"""Forward step for passed-in model.
If first stage, input tensor is obtained from batch, otherwise passed-in input_tensor is used.
Returns output tensor.
Args:
forward_step_func: Model specific function. This takes a minibatch and model as its arguments and
returns the model's output and the loss function.
batch: minibatch
model: unwrappable model
input_tensor:
losses_reduced:
dtype:
disable_autocast:
checkpoint_activations_micro_batch:
Returns:
output_tensor
"""
# timers = get_timers()
# timers("forward-compute").start()
unwrapped_model = unwrap_model(model)
model_type = get_model_type(unwrapped_model)
# NOTE (mkozuki): The passed `model` is expected to implement `set_input_tensor`.
# See https://github.com/NVIDIA/Megatron-LM/blob/5ac5571ba0265af4c491ee0af1508ca7589450c6/megatron/model/transformer.py#L679 # NOQA
# for the details of `set_input_tensor`.
unwrap_output_tensor = not isinstance(input_tensor, list)
if unwrap_output_tensor:
input_tensor = [input_tensor]
input_tensor = [inp.get() if isinstance(inp, FutureTensor) else inp for inp in input_tensor]
unwrapped_model.set_input_tensor(input_tensor)
with torch.cuda.amp.autocast(
enabled=not disable_autocast and dtype in (torch.half, torch.bfloat16),
dtype=dtype,
):
if checkpoint_activations_micro_batch is None:
output_tensor, loss_func = forward_step_func(batch, model)
else:
output_tensor, loss_func = forward_step_func(batch, model, checkpoint_activations_micro_batch)
if parallel_state.is_pipeline_last_stage():
output_tensor = loss_func(output_tensor)
loss, loss_reduced = output_tensor
output_tensor = loss / get_num_microbatches()
losses_reduced.append(loss_reduced)
# timers("forward-compute").stop()
# If T5 model (or other model with encoder and decoder)
# and in decoder stack, then send encoder_hidden_state
# downstream as well.
if (
parallel_state.is_pipeline_stage_after_split()
and model_type == ModelType.encoder_and_decoder
):
return [output_tensor, input_tensor[-1]]
if unwrap_output_tensor:
return output_tensor
return [output_tensor]
def backward_step(
input_tensor: Optional[torch.Tensor],
output_tensor: torch.Tensor,
output_tensor_grad: Optional[torch.Tensor],
model_type: ModelType,
*,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
deallocate_pipeline_outputs: bool = False,
) -> Union[None, torch.Tensor, Sequence[torch.Tensor]]:
"""Backward step through passed-in output tensor.
If last stage, output_tensor_grad is None, otherwise gradient of loss
with respect to stage's output tensor.
Returns gradient of loss with respect to input tensor (None if first
stage).
Args:
input_tensor:
output_tensor:
output_tensor_grad:
Keyword Arguments:
grad_scaler:
deallocate_pipeline_outputs: Experimental.
Returns:
input_tensor_grad
"""
# timers = get_timers()
# timers("backward-compute").start()
# Retain the grad on the input_tensor.
unwrap_input_tensor_grad = not isinstance(input_tensor, list)
if unwrap_input_tensor_grad:
input_tensor = [input_tensor]
input_tensor = [inp.get() if isinstance(inp, FutureTensor) else inp for inp in input_tensor]
for x in input_tensor:
if x is not None:
x.retain_grad()
if not isinstance(output_tensor, list):
output_tensor = [output_tensor]
output_tensor = [out.get() if isinstance(out, FutureTensor) else out for out in output_tensor]
if not isinstance(output_tensor_grad, list):
output_tensor_grad = [output_tensor_grad]
output_tensor_grad = [ogr.get() if isinstance(ogr, FutureTensor) else ogr for ogr in output_tensor_grad]
# Backward pass.
if grad_scaler is not None and output_tensor_grad[0] is None:
output_tensor[0] = grad_scaler.scale(output_tensor[0])
if deallocate_pipeline_outputs:
custom_backward(output_tensor[0], output_tensor_grad[0])
else:
torch.autograd.backward(output_tensor[0], grad_tensors=output_tensor_grad[0])
# Collect the grad of the input_tensor.
input_tensor_grad = [None]
if input_tensor is not None:
input_tensor_grad = []
for x in input_tensor:
input_tensor_grad.append(None if x is None else x.grad)
# Handle single skip connection if it exists (encoder_hidden_state in model with encoder and decoder).
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and parallel_state.is_pipeline_stage_after_split()
and model_type == ModelType.encoder_and_decoder
):
if output_tensor_grad[1] is not None:
# todo (mkozuki): Replace the inplace add with `+= output_tensor_grad[1]`?
input_tensor_grad[-1].add_(output_tensor_grad[1])
# timers("backward-compute").stop()
return input_tensor_grad[0] if unwrap_input_tensor_grad else input_tensor_grad
|
apex-master
|
apex/transformer/pipeline_parallel/schedules/common.py
|
import contextlib
from typing import Any, Callable, List, Optional, Sequence, Union
import warnings
import torch
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.pipeline_parallel.schedules.common import Batch
from apex.transformer.pipeline_parallel.schedules.common import FwdStepFunc
from apex.transformer.pipeline_parallel.schedules.common import backward_step
from apex.transformer.pipeline_parallel.schedules.common import forward_step
from apex.transformer.pipeline_parallel.schedules.common import free_output_tensor
from apex.transformer.pipeline_parallel.utils import get_kth_microbatch
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import get_model_type
from apex.transformer.log_util import get_transformer_logger
__all__ = ["_forward_backward_pipelining_with_interleaving"]
_logger = get_transformer_logger(__name__)
# TODO(mkozuki): Reduce cyclomatic complexity
def _forward_backward_pipelining_with_interleaving(
forward_step_func: FwdStepFunc,
batch: List[Optional[Batch]],
model: List[torch.nn.Module],
*,
forward_only: bool,
tensor_shape: Optional[Union[List[int], torch.Size]] = None,
dtype: Optional[torch.dtype] = None,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
disable_autocast: bool = False,
deallocate_pipeline_outputs: bool = False,
async_comm: bool = False,
sequence_parallel_enabled: bool = False,
custom_sync_context_handler: Optional[Callable] = None,
custom_grad_sync_func: Optional[Callable] = None,
custom_param_sync_func: Optional[Callable] = None,
sync_batch_comm: bool = True,
num_micro_batches_with_partial_activation_checkpoints: Optional[int] = None,
overlap_p2p_comm: bool = False,
batch_p2p_comm: bool = True,
**kwargs,
) -> List[Union[torch.Tensor, Sequence[torch.Tensor]]]:
"""Run interleaved 1F1B schedule with communication between pipeline stages as needed.
This function assumes `batch` and `model` is a list of `Batch`'s and a list of `torch.nn.Module`, respectively.
This means that model is split into model chunks.
This pipeline parallel scheduling consists of three steps:
1. warmup
2. 1F1B a.k.a. steady state
3. cooldown
Note that if `forward_only` this scheduling consists of only warmup phase.
Args:
forward_step_func: A function which takes a minibatch and model as its arguments and
returns model's forward output and the loss function.
The loss function is supposed to take one `torch.Tensor` and
return a `torch.Tensor` of loss and a dictionary of `str` and `torch.Tensor`.
batch: A minibatch, i.e., a list of `torch.Tensor`'s.
model: A `torch.nn.Module` or a list of `torch.nn.Module`.
Keyword args:
forward_only:
tensor_shape: Shape of tensor. The tensor is expected to be 3D and its order of dimension
is supposed to be ``(sequence, batch, hidden)``.
dtype: dtype used in p2p communication. If ``None`` (default value),
torch.float32 will be used even if ``autocast`` is enabled.
grad_scaler:
disable_autocast:
deallocate_pipeline_outputs: If :obj:`True`, free the data of the output tensor of
each pipeline stage. Experimental.
sequence_parallel_enabled: Set to :obj:`True` for this function to handle sequence length.
When :obj:`True`, the sequence length on each tensor model parallel rank is updated
to :math:`original\_sequence\_length / tensor\_model\_parallel\_world\_size`.
custom_sync_context_handler: If provided, this is treated as a
function to construct a context manager to disable
asynchronous gradient reductions. Asynchronous gradient
reductions are only enabled in the final backward pass of
each model chunk.
custom_grad_sync_func: If provided, this is treated as a
function to launch asynchronous gradient reductions (e.g.
reduce-scatters with distributed optimizer). The function
should take one positional argument: a list of parameters
whose gradients should be synchronized. Asynchronous
gradient reductions are launched after the final backward
pass of each model chunk.
custom_param_sync_func: If provided, this is treated as a
function to launch asynchronous parameter synchronizations
(e.g. all-gathers with distributed optimizer). The
function should take one positional argument: a list of
parameters whose values should be synchronized.
Asynchronous parameter synchronizations are launched
before the first forward pass of each model chunk.
sync_batch_comm: If :obj:`False`, disable cuda synchronization after the batched communication.
To disable, https://github.com/pytorch/pytorch/pull/82450 would be required.
num_micro_batches_with_partial_activation_checkpoints: If :obj:`int`, set the number of
micro-batches checkpointing the activation of partial number of Transformer layers.
The rest of the micro-batch within the window of maximum outstanding micro-batch
backpropagations would checkpoint all Transformer layers.
overlap_p2p_comm: If :obj:`True`, returns cuda wait handles to scheduler instead of completing
the communication within the p2p transfer API instance. The scheduler manages the communication completion
to overlap with computation.
batch_p2p_comm: If :obj:`True`, use the batched send and receive api to conduct the communication of
a collection of send and receive operations between peer. If :obj:`False`, conduct each send and recv operation
individually.
Returns:
a list of loss `torch.Tensor`s if the last stage, empty list otherwise.
"""
if not isinstance(model, list):
raise RuntimeError("`model` must be a list of `nn.Module`'s'")
if deallocate_pipeline_outputs:
warnings.warn(
"`deallocate_pipeline_outputs` is experimental and subject to change. "
"This option is not recommended."
)
# Construct helper functions for async grad reductions
if custom_sync_context_handler is not None:
sync_context_handler = custom_sync_context_handler
else:
sync_context_handler = contextlib.nullcontext
sync_context = None
def disable_grad_sync():
"""Disable asynchronous grad reductions"""
nonlocal sync_context
if sync_context is None:
sync_context = sync_context_handler()
sync_context.__enter__()
def enable_grad_sync():
"""Enable asynchronous grad reductions"""
nonlocal sync_context
if sync_context is not None:
sync_context.__exit__(None, None, None)
sync_context = None
disable_grad_sync()
# mypy will blame the following if statement
if sequence_parallel_enabled:
seq_length, batch_size, hidden = tensor_shape
tensor_shape = (
seq_length // parallel_state.get_tensor_model_parallel_world_size(),
batch_size,
hidden,
)
num_model_chunks: int = len(model)
input_tensors: List[List[Union[None, torch.Tensor]]] = [
[] for _ in range(num_model_chunks)
]
output_tensors: List[List[Union[None, torch.Tensor]]] = [
[] for _ in range(num_model_chunks)
]
curr_iters: List[int] = [0 for _ in range(num_model_chunks)]
losses_reduced: List[Union[None, torch.Tensor]] = []
if not forward_only:
output_tensor_grads: List[List[Union[None, torch.Tensor]]] = [
[] for _ in range(num_model_chunks)
]
pipeline_parallel_size: int = parallel_state.get_pipeline_model_parallel_world_size()
pipeline_parallel_rank: int = parallel_state.get_pipeline_model_parallel_rank()
# Compute number of warmup and remaining microbatches.
num_microbatches: int = get_num_microbatches() * num_model_chunks
all_warmup_microbatches: bool = False
if forward_only:
num_warmup_microbatches: int = num_microbatches
else:
# Run all forward passes and then all backward passes if number of
# microbatches is just the number of pipeline stages.
# Otherwise, perform (num_model_chunks-1)*pipeline_parallel_size on
# all workers, followed by more microbatches after depending on
# stage ID (more forward passes for earlier stages, later stages can
# immediately start with 1F1B).
if get_num_microbatches() == pipeline_parallel_size:
num_warmup_microbatches = num_microbatches
all_warmup_microbatches = True
else:
num_warmup_microbatches = (
pipeline_parallel_size - pipeline_parallel_rank - 1
) * 2
num_warmup_microbatches += (num_model_chunks - 1) * pipeline_parallel_size
num_warmup_microbatches = min(num_warmup_microbatches, num_microbatches)
num_microbatches_remaining: int = num_microbatches - num_warmup_microbatches
# Checkpoint the activations of partial Transformer layers in a number of micro-batches
# within the maximum outstanding micro-batch backpropagations.
# Micro-batches with the ids less than 'num_micro_batches_with_partial_activation_checkpoints'
# checkpoint partial Transformer layers (or skip checkpointing) and
# the rest of micro-batches within a window of micro-batches checkpoint
# all Transformer layers. The window of micro-batches is set by the maximum
# outstanding backpropagations and becomes smaller at later pipeline stages.
# Please refer the appendix C in https://arxiv.org/pdf/2205.05198.pdf
max_outstanding_backprops = None
if num_micro_batches_with_partial_activation_checkpoints is not None:
max_outstanding_backprops = num_warmup_microbatches + 1
_logger.info(
f"num_microbatches: {num_microbatches}, "
f"num_warmup_microbatches: {num_warmup_microbatches}, "
f"num_microbatches_remaining: {num_microbatches_remaining}"
)
# Synchronize params for first two model chunks
if custom_param_sync_func is not None:
custom_param_sync_func(model[0].parameters())
custom_param_sync_func(model[1].parameters())
###################################################################################################################
# Helper function definitions.
###################################################################################################################
def get_model_chunk_id(microbatch_id: int, forward: bool) -> int:
"""Helper function to get the model chunk ID given the iteration number.
Each model chunk processes pipeline_parallel_size microbatches
at a time. We assume that the number of microbatches is a
multiple of pipeline_parallel_size*num_model_chunks.
"""
microbatch_group_size = pipeline_parallel_size * num_model_chunks
microbatch_id_in_group = microbatch_id % microbatch_group_size
model_chunk_id = microbatch_id_in_group // pipeline_parallel_size
if not forward:
model_chunk_id = num_model_chunks - model_chunk_id - 1
return model_chunk_id
def is_first_microbatch_for_model_chunk(microbatch_id: int) -> bool:
"""Helper function to check if an iteration is the first for a model
chunk.
"""
microbatch_group_size = pipeline_parallel_size * num_model_chunks
num_microbatch_groups = num_microbatches // microbatch_group_size
microbatch_group_id = microbatch_id // microbatch_group_size
microbatch_id_in_group = microbatch_id % microbatch_group_size
if microbatch_group_id == 0:
return microbatch_id_in_group % pipeline_parallel_size == 0
else:
return False
def is_last_microbatch_for_model_chunk(microbatch_id: int) -> bool:
"""Helper function to check if an iteration is the last for a model
chunk.
"""
microbatch_group_size = pipeline_parallel_size * num_model_chunks
num_microbatch_groups = num_microbatches // microbatch_group_size
microbatch_group_id = microbatch_id // microbatch_group_size
microbatch_id_in_group = microbatch_id % microbatch_group_size
if microbatch_group_id == num_microbatch_groups - 1:
return microbatch_id_in_group % pipeline_parallel_size == pipeline_parallel_size - 1
else:
return False
def forward_step_helper(
microbatch_id: int,
curr_iters: List[int],
checkpoint_activations_micro_batch: Optional[bool] = None,
) -> torch.Tensor:
"""Helper method to run forward step with model split into chunks
(run set_virtual_pipeline_model_parallel_rank() before calling forward_step()).
"""
model_chunk_id = get_model_chunk_id(microbatch_id, forward=True)
parallel_state.set_virtual_pipeline_model_parallel_rank(model_chunk_id)
# launch param synchronization for next model chunk
# Note: To achieve maximum performance, pipeline parallelism
# assumes all ranks have the same compute time. However,
# asynchronous communication tends to slow down compute. Thus,
# we launch asynchronous communication at the same time across
# the pipeline-parallel group.
if custom_param_sync_func is not None:
param_sync_microbatch_id = microbatch_id + pipeline_parallel_rank
if param_sync_microbatch_id < num_microbatches and is_first_microbatch_for_model_chunk(param_sync_microbatch_id):
param_sync_chunk_id = get_model_chunk_id(param_sync_microbatch_id, forward=True) + 1
if 1 < param_sync_chunk_id < num_model_chunks:
custom_param_sync_func(model[param_sync_chunk_id].parameters())
# forward step
if parallel_state.is_pipeline_first_stage() and len(
input_tensors[model_chunk_id]
) == len(output_tensors[model_chunk_id]):
input_tensors[model_chunk_id].append(None)
input_tensor = input_tensors[model_chunk_id][-1]
output_tensor = forward_step(
forward_step_func,
get_kth_microbatch(batch, curr_iters[model_chunk_id]),
model[model_chunk_id],
input_tensor,
losses_reduced,
dtype,
disable_autocast,
checkpoint_activations_micro_batch,
)
curr_iters[model_chunk_id] += 1
output_tensors[model_chunk_id].append(output_tensor)
# if forward-only, no need to save tensors for a backward pass
if forward_only:
input_tensors[model_chunk_id].pop()
output_tensors[model_chunk_id].pop()
return output_tensor
def backward_step_helper(microbatch_id: int) -> torch.Tensor:
"""Helper method to run backward step with model split into chunks
(run set_virtual_pipeline_model_parallel_rank() before calling backward_step()).
"""
model_chunk_id = get_model_chunk_id(microbatch_id, forward=False)
model_type = get_model_type(model[model_chunk_id])
parallel_state.set_virtual_pipeline_model_parallel_rank(model_chunk_id)
# launch grad synchronization (default)
if custom_grad_sync_func is None and is_last_microbatch_for_model_chunk(microbatch_id):
enable_grad_sync()
# backward step
if parallel_state.is_pipeline_last_stage():
if len(output_tensor_grads[model_chunk_id]) == 0:
output_tensor_grads[model_chunk_id].append(None)
input_tensor = input_tensors[model_chunk_id].pop(0)
output_tensor = output_tensors[model_chunk_id].pop(0)
output_tensor_grad = output_tensor_grads[model_chunk_id].pop(0)
input_tensor_grad = backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
deallocate_pipeline_outputs=deallocate_pipeline_outputs,
)
# launch grad synchronization (custom grad sync)
# Note: To achieve maximum performance, pipeline parallelism
# assumes all ranks have the same compute time. However,
# asynchronous communication tends to slow down compute. Thus,
# we launch asynchronous communication at the same time across
# the pipeline-parallel group.
if custom_grad_sync_func is not None:
grad_sync_microbatch_id = microbatch_id - pipeline_parallel_rank
if grad_sync_microbatch_id >= 0 and is_last_microbatch_for_model_chunk(grad_sync_microbatch_id):
grad_sync_chunk_id = get_model_chunk_id(grad_sync_microbatch_id, forward=False)
enable_grad_sync()
custom_grad_sync_func(model[grad_sync_chunk_id].parameters())
disable_grad_sync()
return input_tensor_grad
###################################################################################################################
# Run warmup forward passes.
###################################################################################################################
fwd_wait_handles, bwd_wait_handles = None, None
parallel_state.set_virtual_pipeline_model_parallel_rank(0)
input_tensors[0].append(
p2p_communication.recv_forward(
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
)
_logger.info("Warmup phase")
for k in range(num_warmup_microbatches):
_logger.debug(f"warmup iter: {k} / {num_warmup_microbatches}")
# Decide to checkpoint all layers' activations of the current micro-batch
if max_outstanding_backprops is not None:
checkpoint_activations_micro_batch = k % max_outstanding_backprops >= \
num_micro_batches_with_partial_activation_checkpoints
else:
checkpoint_activations_micro_batch = None
if fwd_wait_handles is not None:
for wait_handle in fwd_wait_handles:
wait_handle.wait()
output_tensor = forward_step_helper(k, curr_iters, checkpoint_activations_micro_batch)
# Determine if tensor should be received from previous stage.
next_forward_model_chunk_id = get_model_chunk_id(k + 1, forward=True)
recv_prev = True
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
if next_forward_model_chunk_id == 0:
recv_prev = False
if k == (num_microbatches - 1):
recv_prev = False
_logger.debug(
f"next fwd model chunk ID: {next_forward_model_chunk_id}, recv_prev: {recv_prev}"
)
# Don't send tensor downstream if on last stage.
if parallel_state.is_pipeline_last_stage():
_logger.debug("Pipeline last stage, not sending tensor downstream")
output_tensor = None
if overlap_p2p_comm:
# P2P communications in warmup are not overlapped with computes. We split P2P
# communications for activation forward and activation_gradient backward in warmup,
# to match the send/recv API granularity in 1F1B in case of using batched send/recv API.
# Send and receive tensors as appropriate (send tensors computed
# in this iteration; receive tensors for next iteration).
_logger.debug("send fwd and receive fwd")
input_tensor, fwd_wait_handles = p2p_communication.send_forward_recv_forward(
output_tensor,
recv_prev=recv_prev,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=True,
batch_p2p_comm=batch_p2p_comm,
)
if (
k == (num_warmup_microbatches - 1)
and not forward_only
and not all_warmup_microbatches
):
input_tensor_grad = None
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
recv_next = False
_logger.debug("send bwd and receive bwd")
output_tensor_grad, bwd_wait_handles = p2p_communication.send_backward_recv_backward(
input_tensor_grad,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=True,
batch_p2p_comm=batch_p2p_comm,
)
output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)
input_tensors[next_forward_model_chunk_id].append(input_tensor)
else:
# Send and receive tensors as appropriate (send tensors computed
# in this iteration; receive tensors for next iteration).
if (
k == (num_warmup_microbatches - 1)
and not forward_only
and not all_warmup_microbatches
):
input_tensor_grad = None
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
recv_next = False
_logger.debug("send fwd&bwd and receive fwd&bwd")
(
input_tensor,
output_tensor_grad,
) = p2p_communication.send_forward_backward_recv_forward_backward(
output_tensor,
input_tensor_grad,
recv_prev=recv_prev,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad)
else:
_logger.debug("send fwd and receive fwd")
input_tensor = p2p_communication.send_forward_recv_forward(
output_tensor,
recv_prev=recv_prev,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
input_tensors[next_forward_model_chunk_id].append(input_tensor)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
###################################################################################################################
# Run 1F1B in steady state.
###################################################################################################################
_logger.info("Steady phase")
for k in range(num_microbatches_remaining):
# Forward pass.
_logger.debug(f" steady phase iter {k} / {num_microbatches_remaining}")
forward_k = k + num_warmup_microbatches
# Decide to checkpoint all layers' activations of the current micro-batch
if max_outstanding_backprops is not None:
checkpoint_activations_micro_batch = (
forward_k % max_outstanding_backprops >= num_micro_batches_with_partial_activation_checkpoints
)
else:
checkpoint_activations_micro_batch = None
if overlap_p2p_comm:
if fwd_wait_handles is not None:
for wait_handle in fwd_wait_handles:
wait_handle.wait()
output_tensor = forward_step_helper(forward_k, curr_iters, checkpoint_activations_micro_batch)
# Set forward model chunk id
forward_model_chunk_id = get_model_chunk_id(forward_k, forward=True)
parallel_state.set_virtual_pipeline_model_parallel_rank(forward_model_chunk_id)
# Last virtual stage no activation tensor to send
if parallel_state.is_pipeline_last_stage():
output_tensor = None
# Determine if the current virtual stage has an activation tensor to receive
recv_prev = True
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
# First stage is ahead of last stage by (pipeline_parallel_size - 1).
next_forward_model_chunk_id = get_model_chunk_id(
forward_k - (pipeline_parallel_size - 1), forward=True
)
if next_forward_model_chunk_id == (num_model_chunks - 1):
recv_prev = False
next_forward_model_chunk_id += 1
else:
next_forward_model_chunk_id = get_model_chunk_id(
forward_k + 1, forward=True
)
# If last iteration, don't receive; we already received one extra
# before the start of the for loop.
if k == (num_microbatches_remaining - 1):
recv_prev = False
# Send activation tensor to the next stage and receive activation tensor from the
# previous stage
_logger.debug("send fwd and receive fwd")
input_tensor, fwd_wait_handles = p2p_communication.send_forward_recv_forward(
output_tensor,
recv_prev=recv_prev,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=True,
batch_p2p_comm=batch_p2p_comm,
)
if bwd_wait_handles is not None:
for wait_handle in bwd_wait_handles:
wait_handle.wait()
# Backward pass.
backward_k = k
input_tensor_grad = backward_step_helper(backward_k)
# Set backward model chunk id
backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False)
parallel_state.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id)
_logger.debug(
f"fwd/bwd model chunk id: {forward_model_chunk_id}/{backward_model_chunk_id}"
)
# First virtual stage no activation gradient tensor to send
if parallel_state.is_pipeline_first_stage():
input_tensor_grad = None
# Determine if the current virtual stage has an activation gradient tensor to receive
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
# Last stage is ahead of first stage by (pipeline_parallel_size - 1).
next_backward_model_chunk_id = get_model_chunk_id(
backward_k - (pipeline_parallel_size - 1), forward=False
)
if next_backward_model_chunk_id == 0:
recv_next = False
next_backward_model_chunk_id -= 1
else:
next_backward_model_chunk_id = get_model_chunk_id(
backward_k + 1, forward=False
)
# Send activation grad tensor to the previous stage and receive activation grad tensor
# from the previous stage
_logger.debug("send bwd and receive bwd")
output_tensor_grad, bwd_wait_handles = p2p_communication.send_backward_recv_backward(
input_tensor_grad,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
overlap_p2p_comm=True,
batch_p2p_comm=batch_p2p_comm,
)
else:
output_tensor = forward_step_helper(forward_k, curr_iters, checkpoint_activations_micro_batch)
# Backward pass.
backward_k = k
input_tensor_grad = backward_step_helper(backward_k)
# Send output_tensor and input_tensor_grad, receive input_tensor
# and output_tensor_grad.
# Determine if current stage has anything to send in either direction,
# otherwise set tensor to None.
forward_model_chunk_id = get_model_chunk_id(forward_k, forward=True)
parallel_state.set_virtual_pipeline_model_parallel_rank(forward_model_chunk_id)
if parallel_state.is_pipeline_last_stage():
output_tensor = None
backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False)
parallel_state.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id)
_logger.debug(
f"fwd/bwd model chunk id: {forward_model_chunk_id}/{backward_model_chunk_id}"
)
if parallel_state.is_pipeline_first_stage():
input_tensor_grad = None
# Determine if peers are sending, and where in data structure to put
# received tensors.
recv_prev = True
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
# First stage is ahead of last stage by (pipeline_parallel_size - 1).
next_forward_model_chunk_id = get_model_chunk_id(
forward_k - (pipeline_parallel_size - 1), forward=True
)
if next_forward_model_chunk_id == (num_model_chunks - 1):
recv_prev = False
next_forward_model_chunk_id += 1
else:
next_forward_model_chunk_id = get_model_chunk_id(
forward_k + 1, forward=True
)
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
# Last stage is ahead of first stage by (pipeline_parallel_size - 1).
next_backward_model_chunk_id = get_model_chunk_id(
backward_k - (pipeline_parallel_size - 1), forward=False
)
if next_backward_model_chunk_id == 0:
recv_next = False
next_backward_model_chunk_id -= 1
else:
next_backward_model_chunk_id = get_model_chunk_id(
backward_k + 1, forward=False
)
# If last iteration, don't receive; we already received one extra
# before the start of the for loop.
if k == (num_microbatches_remaining - 1):
recv_prev = False
# Communicate tensors.
_logger.debug("send fwd&bwd and receive fwd&bwd")
(
input_tensor,
output_tensor_grad,
) = p2p_communication.send_forward_backward_recv_forward_backward(
output_tensor,
input_tensor_grad,
recv_prev=recv_prev,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
# Put input_tensor and output_tensor_grad in data structures in the
# right location.
if recv_prev:
input_tensors[next_forward_model_chunk_id].append(input_tensor)
if recv_next:
output_tensor_grads[next_backward_model_chunk_id].append(output_tensor_grad)
###################################################################################################################
# Run cooldown backward passes (flush out pipeline).
###################################################################################################################
_logger.info("Cooldown phase")
if not forward_only:
if overlap_p2p_comm and bwd_wait_handles is not None:
for wait_handle in bwd_wait_handles:
wait_handle.wait()
if all_warmup_microbatches:
output_tensor_grads[num_model_chunks - 1].append(
p2p_communication.recv_backward(
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
)
for k in range(num_microbatches_remaining, num_microbatches):
_logger.debug(
f"cooldown iter {k} in range({num_microbatches_remaining}, {num_microbatches})"
)
input_tensor_grad = backward_step_helper(k)
next_backward_model_chunk_id = get_model_chunk_id(k + 1, forward=False)
recv_next = True
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
if next_backward_model_chunk_id == (num_model_chunks - 1):
recv_next = False
if k == (num_microbatches - 1):
recv_next = False
output_tensor_grads[next_backward_model_chunk_id].append(
p2p_communication.send_backward_recv_backward(
input_tensor_grad,
recv_next=recv_next,
tensor_shape=tensor_shape,
dtype=dtype,
async_comm=async_comm,
sequence_parallel_enabled=sequence_parallel_enabled,
sync_batch_comm=sync_batch_comm,
batch_p2p_comm=batch_p2p_comm,
)
)
# Make sure to exit context handler for async grad reductions
enable_grad_sync()
return losses_reduced
|
apex-master
|
apex/transformer/pipeline_parallel/schedules/fwd_bwd_pipelining_with_interleaving.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron arguments."""
import argparse
import os
import torch
def parse_args(extra_args_provider=None, defaults={}, override_args={},
ignore_unknown_args=False):
"""Parse all arguments."""
parser = argparse.ArgumentParser(description='Megatron-LM Arguments',
allow_abbrev=False)
# Standard arguments.
parser = _add_network_size_args(parser)
parser = _add_regularization_args(parser)
parser = _add_training_args(parser)
parser = _add_initialization_args(parser)
parser = _add_learning_rate_args(parser)
parser = _add_checkpointing_args(parser)
parser = _add_mixed_precision_args(parser)
parser = _add_distributed_args(parser)
parser = _add_validation_args(parser)
parser = _add_data_args(parser)
parser = _add_autoresume_args(parser)
parser = _add_biencoder_args(parser)
parser = _add_vision_args(parser)
parser = _add_logging_args(parser)
# NOTE(mkozuki): This option is added to investigate the potential of `torch.autograd.graph.save_on_cpu()`.
# ref: https://pytorch.org/docs/stable/autograd.html#torch.autograd.graph.save_on_cpu.
parser.add_argument('--cpu-offload', action='store_true', default=False, help='Turns on CPU offloading')
# Custom arguments.
if extra_args_provider is not None:
parser = extra_args_provider(parser)
# Parse.
if ignore_unknown_args:
args, _ = parser.parse_known_args()
else:
args = parser.parse_args()
# Distributed args.
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
for key in override_args:
setattr(args, key, override_args[key])
# Tensor model parallel size.
args.tensor_model_parallel_size = min(
args.tensor_model_parallel_size, args.world_size)
assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\
' ({}) is not divisible by tensor model parallel size ({})'.format(
args.world_size, args.tensor_model_parallel_size)
# Pipeline model parallel size.
args.pipeline_model_parallel_size = min(
args.pipeline_model_parallel_size,
(args.world_size // args.tensor_model_parallel_size))
args.transformer_pipeline_model_parallel_size = (
args.pipeline_model_parallel_size - 1
if args.standalone_embedding_stage else
args.pipeline_model_parallel_size
)
# Checks.
model_parallel_size = args.pipeline_model_parallel_size * \
args.tensor_model_parallel_size
assert args.world_size % model_parallel_size == 0, 'world size is not'\
' divisible by tensor parallel size ({}) times pipeline parallel ' \
'size ({})'.format(args.world_size, args.tensor_model_parallel_size,
args.pipeline_model_parallel_size)
args.data_parallel_size = args.world_size // model_parallel_size
if args.rank == 0:
print('using world size: {}, data-parallel-size: {}, '
'tensor-model-parallel size: {}, '
'pipeline-model-parallel size: {} '.format(
args.world_size, args.data_parallel_size,
args.tensor_model_parallel_size,
args.pipeline_model_parallel_size), flush=True)
if args.pipeline_model_parallel_size > 1:
if args.pipeline_model_parallel_split_rank is not None:
assert args.pipeline_model_parallel_split_rank < \
args.pipeline_model_parallel_size, 'split rank needs'\
' to be less than pipeline model parallel size ({})'.format(
args.pipeline_model_parallel_size)
# Deprecated arguments
assert args.batch_size is None, '--batch-size argument is no longer ' \
'valid, use --micro-batch-size instead'
del args.batch_size
assert args.warmup is None, '--warmup argument is no longer valid, use ' \
'--lr-warmup-fraction instead'
del args.warmup
assert args.model_parallel_size is None, '--model-parallel-size is no ' \
'longer valid, use --tensor-model-parallel-size instead'
del args.model_parallel_size
if args.checkpoint_activations:
args.recompute_granularity = 'full'
args.recompute_method = 'uniform'
if args.rank == 0:
print('--checkpoint-activations is no longer valid, '
'use --recompute-granularity and --recompute-method instead. '
'Defaulting to recompute-granularity=full and recompute-method=uniform.')
del args.checkpoint_activations
if args.recompute_activations:
args.recompute_granularity = 'selective'
del args.recompute_activations
# Set input defaults.
for key in defaults:
# For default to be valid, it should not be provided in the
# arguments that are passed to the program. We check this by
# ensuring the arg is set to None.
if getattr(args, key) is not None:
if args.rank == 0:
print('WARNING: overriding default arguments for {key}:{v} \
with {key}:{v2}'.format(key=key, v=defaults[key],
v2=getattr(args, key)),
flush=True)
else:
setattr(args, key, defaults[key])
# Batch size.
assert args.micro_batch_size is not None
assert args.micro_batch_size > 0
if args.global_batch_size is None:
args.global_batch_size = args.micro_batch_size * args.data_parallel_size
if args.rank == 0:
print('setting global batch size to {}'.format(
args.global_batch_size), flush=True)
assert args.global_batch_size > 0
if args.num_layers_per_virtual_pipeline_stage is not None:
assert args.pipeline_model_parallel_size > 2, \
'pipeline-model-parallel size should be greater than 2 with ' \
'interleaved schedule'
assert args.num_layers % args.num_layers_per_virtual_pipeline_stage == 0, \
'number of layers is not divisible by number of layers per virtual ' \
'pipeline stage'
args.virtual_pipeline_model_parallel_size = \
(args.num_layers // args.pipeline_model_parallel_size) // \
args.num_layers_per_virtual_pipeline_stage
else:
args.virtual_pipeline_model_parallel_size = None
# Parameters dtype.
args.params_dtype = torch.float
if args.fp16:
assert not args.bf16
args.params_dtype = torch.half
if args.bf16:
assert not args.fp16
args.params_dtype = torch.bfloat16
# bfloat16 requires gradient accumulation and all-reduce to
# be done in fp32.
if not args.accumulate_allreduce_grads_in_fp32:
args.accumulate_allreduce_grads_in_fp32 = True
if args.rank == 0:
print('accumulate and all-reduce gradients in fp32 for '
'bfloat16 data type.', flush=True)
if args.rank == 0:
print('using {} for parameters ...'.format(args.params_dtype),
flush=True)
# If we do accumulation and all-reduces in fp32, we need to have local DDP
# and we should make sure use-contiguous-buffers-in-local-ddp is not off.
if args.accumulate_allreduce_grads_in_fp32:
assert args.DDP_impl == 'local'
assert args.use_contiguous_buffers_in_local_ddp
else:
if args.gradient_accumulation_fusion:
args.gradient_accumulation_fusion = False
if args.rank == 0:
print('Gradient accumulation fusion to linear layer weight '
'gradient computation is supported only with fp32 '
'gradient accumulation. Setting gradient_accumulation_fusion '
'to False', flush=True)
# For torch DDP, we do not use contiguous buffer
if args.DDP_impl == 'torch':
args.use_contiguous_buffers_in_local_ddp = False
if args.dataloader_type is None:
args.dataloader_type = 'single'
# Consumed tokens.
args.consumed_train_samples = 0
args.consumed_valid_samples = 0
# Iteration-based training.
if args.train_iters:
# If we use iteration-based training, make sure the
# sample-based options are off.
assert args.train_samples is None, \
'expected iteration-based training'
assert args.lr_decay_samples is None, \
'expected iteration-based learning rate decay'
assert args.lr_warmup_samples == 0, \
'expected iteration-based learning rate warmup'
assert args.rampup_batch_size is None, \
'expected no batch-size rampup for iteration-based training'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_iters == 0, \
'can only specify one of lr-warmup-fraction and lr-warmup-iters'
# Sample-based training.
if args.train_samples:
# If we use sample-based training, make sure the
# iteration-based options are off.
assert args.train_iters is None, \
'expected sample-based training'
assert args.lr_decay_iters is None, \
'expected sample-based learning rate decay'
assert args.lr_warmup_iters == 0, \
'expected sample-based learnig rate warmup'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_samples == 0, \
'can only specify one of lr-warmup-fraction ' \
'and lr-warmup-samples'
# Check required arguments.
required_args = ['num_layers', 'hidden_size', 'num_attention_heads',
'max_position_embeddings']
for req_arg in required_args:
_check_arg_is_not_none(args, req_arg)
# Checks.
if args.ffn_hidden_size is None:
args.ffn_hidden_size = 4 * args.hidden_size
if args.kv_channels is None:
assert args.hidden_size % args.num_attention_heads == 0
args.kv_channels = args.hidden_size // args.num_attention_heads
if args.seq_length is not None:
assert args.encoder_seq_length is None
args.encoder_seq_length = args.seq_length
else:
assert args.encoder_seq_length is not None
args.seq_length = args.encoder_seq_length
if args.seq_length is not None:
assert args.max_position_embeddings >= args.seq_length
if args.decoder_seq_length is not None:
assert args.max_position_embeddings >= args.decoder_seq_length
if args.lr is not None:
assert args.min_lr <= args.lr
if args.save is not None:
assert args.save_interval is not None
# Mixed precision checks.
if args.fp16_lm_cross_entropy:
assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.'
if args.fp32_residual_connection:
assert args.fp16 or args.bf16, \
'residual connection in fp32 only supported when using fp16 or bf16.'
if args.weight_decay_incr_style == 'constant':
assert args.start_weight_decay is None
assert args.end_weight_decay is None
args.start_weight_decay = args.weight_decay
args.end_weight_decay = args.weight_decay
else:
assert args.start_weight_decay is not None
assert args.end_weight_decay is not None
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
# Persistent fused layer norm.
if TORCH_MAJOR < 1 or (TORCH_MAJOR == 1 and TORCH_MINOR < 11):
args.no_persist_layer_norm = True
if args.rank == 0:
print('Persistent fused layer norm kernel is supported from '
'pytorch v1.11 (nvidia pytorch container paired with v1.11). '
'Defaulting to no_persist_layer_norm=True')
# Activation recomputing.
if args.distribute_saved_activations:
assert args.tensor_model_parallel_size > 1, 'can distribute ' \
'recomputed activations only across tensor model ' \
'parallel groups'
assert args.recompute_granularity == 'full', \
'distributed recompute activations is only '\
'application to full recompute granularity'
assert args.recompute_method is not None, \
'for distributed recompute activations to work you '\
'need to use a recompute method '
assert TORCH_MAJOR >= 1 and TORCH_MINOR >= 10, \
'distributed recompute activations are supported for pytorch ' \
'v1.10 and above (Nvidia Pytorch container >= 21.07). Current ' \
'pytorch version is v%s.%s.' % (TORCH_MAJOR, TORCH_MINOR)
if args.recompute_granularity == 'selective':
assert args.recompute_method is None, \
'recompute method is not yet supported for ' \
'selective recomputing granularity'
# disable async_tensor_model_parallel_allreduce when
# model parallel memory optimization is enabled
if args.sequence_parallel:
args.async_tensor_model_parallel_allreduce = False
_print_args(args)
return args
def _print_args(args):
"""Print arguments."""
if args.rank == 0:
print('------------------------ arguments ------------------------',
flush=True)
str_list = []
for arg in vars(args):
dots = '.' * (48 - len(arg))
str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg)))
for arg in sorted(str_list, key=lambda x: x.lower()):
print(arg, flush=True)
print('-------------------- end of arguments ---------------------',
flush=True)
def _check_arg_is_not_none(args, arg):
assert getattr(args, arg) is not None, '{} argument is None'.format(arg)
def _add_inference_args(parser):
group = parser.add_argument_group(title='inference')
group.add_argument('--inference-batch-times-seqlen-threshold',
type=int, default=512,
help='During inference, if batch-size times '
'sequence-length is smaller than this threshold '
'then we will not use pipelining, otherwise we will.')
return parser
def _add_network_size_args(parser):
group = parser.add_argument_group(title='network size')
group.add_argument('--num-layers', type=int, default=None,
help='Number of transformer layers.')
group.add_argument('--hidden-size', type=int, default=None,
help='Tansformer hidden size.')
group.add_argument('--ffn-hidden-size', type=int, default=None,
help='Transformer Feed-Forward Network hidden size. '
'This is set to 4*hidden-size if not provided')
group.add_argument('--num-attention-heads', type=int, default=None,
help='Number of transformer attention heads.')
group.add_argument('--kv-channels', type=int, default=None,
help='Projection weights dimension in multi-head '
'attention. This is set to '
' args.hidden_size // args.num_attention_heads '
'if not provided.')
group.add_argument('--max-position-embeddings', type=int, default=None,
help='Maximum number of position embeddings to use. '
'This is the size of position embedding.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='Layer norm epsilon.')
group.add_argument('--apply-residual-connection-post-layernorm',
action='store_true',
help='If set, use original BERT residula connection '
'ordering.')
group.add_argument('--openai-gelu', action='store_true',
help='Use OpenAIs GeLU implementation. This option'
'should not be used unless for backward compatibility'
'reasons.')
group.add_argument('--onnx-safe', type=bool, required=False,
help='Use workarounds for known problems with '
'Torch ONNX exporter')
group.add_argument('--bert-no-binary-head', action='store_false',
help='Disable BERT binary head.',
dest='bert_binary_head')
group.add_argument('--num-experts', type=int, default=None,
help='Number of Experts in Switch Transformer (None means no Switch)')
return parser
def _add_logging_args(parser):
group = parser.add_argument_group(title='logging')
group.add_argument('--log-params-norm', action='store_true',
help='If set, calculate and log parameters norm.')
group.add_argument('--log-num-zeros-in-grad', action='store_true',
help='If set, calculate and log the number of zeros in gradient.')
group.add_argument('--tensorboard-log-interval', type=int, default=1,
help='Report to tensorboard interval.')
group.add_argument('--tensorboard-queue-size', type=int, default=1000,
help='Size of the tensorboard queue for pending events '
'and summaries before one of the ‘add’ calls forces a '
'flush to disk.')
group.add_argument('--log-timers-to-tensorboard', action='store_true',
help='If set, write timers to tensorboard.')
group.add_argument('--log-batch-size-to-tensorboard', action='store_true',
help='If set, write batch-size to tensorboard.')
group.add_argument('--no-log-learnig-rate-to-tensorboard',
action='store_false',
help='Disable learning rate logging to tensorboard.',
dest='log_learning_rate_to_tensorboard')
group.add_argument('--no-log-loss-scale-to-tensorboard',
action='store_false',
help='Disable loss-scale logging to tensorboard.',
dest='log_loss_scale_to_tensorboard')
group.add_argument('--log-validation-ppl-to-tensorboard',
action='store_true',
help='If set, write validation perplexity to '
'tensorboard.')
group.add_argument('--log-memory-to-tensorboard',
action='store_true',
help='Enable memory logging to tensorboard.')
group.add_argument('--log-world-size-to-tensorboard',
action='store_true',
help='Enable world size logging to tensorboard.')
return parser
def _add_regularization_args(parser):
group = parser.add_argument_group(title='regularization')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='Post attention dropout probability.')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='Dropout probability for hidden state transformer.')
group.add_argument('--weight-decay', type=float, default=0.01,
help='Weight decay coefficient for L2 regularization.')
group.add_argument('--start-weight-decay', type=float,
help='Initial weight decay coefficient for L2 regularization.')
group.add_argument('--end-weight-decay', type=float,
help='End of run weight decay coefficient for L2 regularization.')
group.add_argument('--weight-decay-incr-style', type=str, default='constant',
choices=['constant', 'linear', 'cosine'],
help='Weight decay increment function.')
group.add_argument('--clip-grad', type=float, default=1.0,
help='Gradient clipping based on global L2 norm.')
group.add_argument('--adam-beta1', type=float, default=0.9,
help='First coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-beta2', type=float, default=0.999,
help='Second coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-eps', type=float, default=1e-08,
help='Term added to the denominator to improve'
'numerical stability')
group.add_argument('--sgd-momentum', type=float, default=0.9,
help='Momentum factor for sgd')
return parser
def _add_training_args(parser):
group = parser.add_argument_group(title='training')
group.add_argument('--micro-batch-size', type=int, default=None,
help='Batch size per model instance (local batch size). '
'Global batch size is local batch size times data '
'parallel size times number of micro batches.')
group.add_argument('--batch-size', type=int, default=None,
help='Old batch size parameter, do not use. '
'Use --micro-batch-size instead')
group.add_argument('--global-batch-size', type=int, default=None,
help='Training batch size. If set, it should be a '
'multiple of micro-batch-size times data-parallel-size. '
'If this value is None, then '
'use micro-batch-size * data-parallel-size as the '
'global batch size. This choice will result in 1 for '
'number of micro-batches.')
group.add_argument('--rampup-batch-size', nargs='*', default=None,
help='Batch size ramp up with the following values:'
' --rampup-batch-size <start batch size> '
' <batch size incerement> '
' <ramp-up samples> '
'For example:'
' --rampup-batch-size 16 8 300000 \ '
' --global-batch-size 1024'
'will start with global batch size 16 and over '
' (1024 - 16) / 8 = 126 intervals will increase'
'the batch size linearly to 1024. In each interval'
'we will use approximately 300000 / 126 = 2380 samples.')
group.add_argument('--recompute-activations', action='store_true',
help='recompute activation to allow for training '
'with larger models, sequences, and batch sizes.')
group.add_argument('--recompute-granularity', type=str, default=None,
choices=['full', 'selective'],
help='Checkpoint activations to allow for training '
'with larger models, sequences, and batch sizes. '
'It is supported at two granularities 1) full: '
'whole transformer layer is recomputed, '
'2) selective: core attention part of the transformer '
'layer is recomputed.')
group.add_argument('--distribute-saved-activations',
action='store_true',
help='If set, distribute recomputed activations '
'across model parallel group.')
group.add_argument('--recompute-method', type=str, default=None,
choices=['uniform', 'block'],
help='1) uniform: uniformly divide the total number of '
'Transformer layers and recompute the input activation of '
'each divided chunk at specified granularity, '
'2) recompute the input activations of only a set number of '
'individual Transformer layers per pipeline stage and do the '
'rest without any recomputing at specified granularity'
'default) do not apply activations recompute to any layers')
group.add_argument('--recompute-num-layers', type=int, default=1,
help='1) uniform: the number of Transformer layers in each '
'uniformly divided recompute unit, '
'2) block: the number of individual Transformer layers '
'to recompute within each pipeline stage.')
# deprecated
group.add_argument('--checkpoint-activations', action='store_true',
help='Checkpoint activation to allow for training '
'with larger models, sequences, and batch sizes.')
group.add_argument('--train-iters', type=int, default=None,
help='Total number of iterations to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-samples', type=int, default=None,
help='Total number of samples to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--log-interval', type=int, default=100,
help='Report loss and timing interval.')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after the iteration is divisible '
'by this value.')
group.add_argument('--exit-duration-in-mins', type=int, default=None,
help='Exit the program after this many minutes.')
group.add_argument('--tensorboard-dir', type=str, default=None,
help='Write TensorBoard logs to this directory.')
group.add_argument('--no-masked-softmax-fusion',
action='store_false',
help='Disable fusion of query_key_value scaling, '
'masking, and softmax.',
dest='masked_softmax_fusion')
group.add_argument('--no-bias-gelu-fusion', action='store_false',
help='Disable bias and gelu fusion.',
dest='bias_gelu_fusion')
group.add_argument('--no-bias-dropout-fusion', action='store_false',
help='Disable bias and dropout fusion.',
dest='bias_dropout_fusion')
group.add_argument('--optimizer', type=str, default='adam',
choices=['adam', 'sgd'],
help='Optimizer function')
group.add_argument('--dataloader-type', type=str, default=None,
choices=['single', 'cyclic'],
help='Single pass vs multiple pass data loader')
group.add_argument('--no-async-tensor-model-parallel-allreduce',
action='store_true',
help='Disable asynchronous execution of '
'tensor-model-parallel all-reduce with weight '
'gradient compuation of a column-linear layer.',
dest='async_tensor_model_parallel_allreduce')
group.add_argument('--no-persist-layer-norm', action='store_true',
help='Disable using persistent fused layer norm kernel. '
'This kernel supports only a set of hidden sizes. Please '
'check persist_ln_hidden_sizes if your hidden '
'size is supported.')
group.add_argument('--sequence-parallel', action='store_true',
help='Enable sequence parallel optimization.')
group.add_argument('--no-gradient-accumulation-fusion',
action='store_false',
help='Disable fusing gradient accumulation to weight '
'gradient computation of linear layers',
dest='gradient_accumulation_fusion')
return parser
def _add_initialization_args(parser):
group = parser.add_argument_group(title='initialization')
group.add_argument('--seed', type=int, default=1234,
help='Random seed used for python, numpy, '
'pytorch, and cuda.')
group.add_argument('--init-method-std', type=float, default=0.02,
help='Standard deviation of the zero mean normal '
'distribution used for weight initialization.')
group.add_argument('--init-method-xavier-uniform', action='store_true',
help='Enable Xavier uniform parameter initialization')
return parser
def _add_learning_rate_args(parser):
group = parser.add_argument_group(title='learning rate')
group.add_argument('--lr', type=float, default=None,
help='Initial learning rate. Depending on decay style '
'and initial warmup, the learing rate at each '
'iteration would be different.')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine'],
help='Learning rate decay function.')
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay learning rate over,'
' If None defaults to `--train-iters`')
group.add_argument('--lr-decay-samples', type=int, default=None,
help='number of samples to decay learning rate over,'
' If None defaults to `--train-samples`')
group.add_argument('--lr-warmup-fraction', type=float, default=None,
help='fraction of lr-warmup-(iters/samples) to use '
'for warmup (as a float)')
group.add_argument('--lr-warmup-iters', type=int, default=0,
help='number of iterations to linearly warmup '
'learning rate over.')
group.add_argument('--lr-warmup-samples', type=int, default=0,
help='number of samples to linearly warmup '
'learning rate over.')
group.add_argument('--warmup', type=int, default=None,
help='Old lr warmup argument, do not use. Use one of the'
'--lr-warmup-* arguments above')
group.add_argument('--min-lr', type=float, default=0.0,
help='Minumum value for learning rate. The scheduler'
'clip values below this threshold.')
group.add_argument('--override-lr-scheduler', action='store_true',
help='Reset the values of the scheduler (learning rate,'
'warmup iterations, minimum learning rate, maximum '
'number of iterations, and decay style from input '
'arguments and ignore values from checkpoints. Note'
'that all the above values will be reset.')
group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
help='Use checkpoint to set the values of the scheduler '
'(learning rate, warmup iterations, minimum learning '
'rate, maximum number of iterations, and decay style '
'from checkpoint and ignore input arguments.')
return parser
def _add_checkpointing_args(parser):
group = parser.add_argument_group(title='checkpointing')
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=None,
help='Number of iterations between checkpoint saves.')
group.add_argument('--no-save-optim', action='store_true', default=None,
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true', default=None,
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true', default=None,
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true', default=None,
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
return parser
def _add_mixed_precision_args(parser):
group = parser.add_argument_group(title='mixed precision')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode.')
group.add_argument('--bf16', action='store_true',
help='Run model in bfloat16 mode.')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--initial-loss-scale', type=float, default=2**32,
help='Initial loss-scale for dynamic loss scaling.')
group.add_argument('--min-loss-scale', type=float, default=1.0,
help='Minimum loss scale for dynamic loss scale.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale.')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--fp32-residual-connection', action='store_true',
help='Move residual connections to fp32.')
group.add_argument('--no-query-key-layer-scaling', action='store_false',
help='Do not scale Q * K^T by 1 / layer-number.',
dest='apply_query_key_layer_scaling')
group.add_argument('--attention-softmax-in-fp32', action='store_true',
help='Run attention masking and softmax in fp32. '
'This flag is ignored unless '
'--no-query-key-layer-scaling is specified.')
group.add_argument('--accumulate-allreduce-grads-in-fp32',
action='store_true',
help='Gradient accumulation and all-reduce in fp32.')
group.add_argument('--fp16-lm-cross-entropy', action='store_true',
help='Move the cross entropy unreduced loss calculation'
'for lm head to fp16.')
return parser
def _add_distributed_args(parser):
group = parser.add_argument_group(title='distributed')
group.add_argument('--tensor-model-parallel-size', type=int, default=1,
help='Degree of tensor model parallelism.')
group.add_argument('--pipeline-model-parallel-size', type=int, default=1,
help='Degree of pipeline model parallelism.')
group.add_argument('--pipeline-model-parallel-split-rank',
type=int, default=None,
help='Rank where encoder and decoder should be split.')
group.add_argument('--model-parallel-size', type=int, default=None,
help='Old model parallel argument, do not use. Use '
'--tensor-model-parallel-size instead.')
group.add_argument('--num-layers-per-virtual-pipeline-stage', type=int, default=None,
help='Number of layers per virtual pipeline stage')
group.add_argument('--distributed-backend', default='nccl',
choices=['nccl', 'gloo'],
help='Which backend to use for distributed training.')
group.add_argument('--DDP-impl', default='local',
choices=['local', 'torch'],
help='which DistributedDataParallel implementation '
'to use.')
group.add_argument('--no-contiguous-buffers-in-local-ddp',
action='store_false', help='If set, dont use '
'contiguous buffer in local DDP.',
dest='use_contiguous_buffers_in_local_ddp')
group.add_argument('--no-scatter-gather-tensors-in-pipeline', action='store_false',
help='Use scatter/gather to optimize communication of tensors in pipeline',
dest='scatter_gather_tensors_in_pipeline')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--lazy-mpu-init', type=bool, required=False,
help='If set to True, initialize_megatron() '
'skips DDP initialization and returns function to '
'complete it instead.Also turns on '
'--use-cpu-initialization flag. This is for '
'external DDP manager.' )
group.add_argument('--use-cpu-initialization', action='store_true',
default=None, help='If set, affine parallel weights '
'initialization uses CPU' )
group.add_argument('--empty-unused-memory-level', default=0, type=int,
choices=[0, 1, 2],
help='Call torch.cuda.empty_cache() each iteration '
'(training and eval), to reduce fragmentation.'
'0=off, 1=moderate, 2=aggressive.')
group.add_argument('--standalone-embedding-stage', action='store_true',
default=False, help='If set, *input* embedding layer '
'is placed on its own pipeline stage, without any '
'transformer layers. (For T5, this flag currently only '
'affects the encoder embedding.)')
return parser
def _add_validation_args(parser):
group = parser.add_argument_group(title='validation')
group.add_argument('--eval-iters', type=int, default=100,
help='Number of iterations to run for evaluation'
'validation/test for.')
group.add_argument('--eval-interval', type=int, default=1000,
help='Interval between running evaluation on '
'validation set.')
return parser
def _add_data_args(parser):
group = parser.add_argument_group(title='data and dataloader')
group.add_argument('--data-path', nargs='*', default=None,
help='Path to the training dataset. Accepted format:'
'1) a single data path, 2) multiple datasets in the'
'form: dataset1-weight dataset1-path dataset2-weight '
'dataset2-path ...')
group.add_argument('--split', type=str, default='969, 30, 1',
help='Comma-separated list of proportions for training,'
' validation, and test split. For example the split '
'`90,5,5` will use 90%% of data for training, 5%% for '
'validation and 5%% for test.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file.')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file.')
group.add_argument('--vocab-extra-ids', type=int, default=0,
help='Number of additional vocabulary tokens. '
'They are used for span masking in the T5 model')
group.add_argument('--seq-length', type=int, default=None,
help='Maximum sequence length to process.')
group.add_argument('--encoder-seq-length', type=int, default=None,
help='Maximum encoder sequence length to process.'
'This should be exclusive of --seq-length')
group.add_argument('--decoder-seq-length', type=int, default=None,
help="Maximum decoder sequence length to process.")
group.add_argument('--retriever-seq-length', type=int, default=256,
help='Maximum sequence length for the biencoder model '
' for retriever')
group.add_argument('--sample-rate', type=float, default=1.0,
help='sample rate for training data. Supposed to be 0 '
' < sample_rate < 1')
group.add_argument('--mask-prob', type=float, default=0.15,
help='Probability of replacing a token with mask.')
group.add_argument('--short-seq-prob', type=float, default=0.1,
help='Probability of producing a short sequence.')
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--num-workers', type=int, default=2,
help="Dataloader number of workers.")
group.add_argument('--tokenizer-type', type=str,
default=None,
choices=['BertWordPieceLowerCase',
'BertWordPieceCase',
'GPT2BPETokenizer'],
help='What type of tokenizer to use.')
group.add_argument('--data-impl', type=str, default='infer',
choices=['lazy', 'cached', 'mmap', 'infer'],
help='Implementation of indexed datasets.')
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
group.add_argument('--eod-mask-loss', action='store_true',
help='Mask loss for the end of document tokens.')
return parser
def _add_autoresume_args(parser):
group = parser.add_argument_group(title='autoresume')
group.add_argument('--adlr-autoresume', action='store_true',
help='Enable autoresume on adlr cluster.')
group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
help='Intervals over which check for autoresume'
'termination signal')
return parser
def _add_biencoder_args(parser):
group = parser.add_argument_group(title='biencoder')
# network size
group.add_argument('--ict-head-size', type=int, default=None,
help='Size of block embeddings to be used in ICT and '
'REALM (paper default: 128)')
group.add_argument('--biencoder-projection-dim', type=int, default=0,
help='Size of projection head used in biencoder (paper'
' default: 128)')
group.add_argument('--biencoder-shared-query-context-model', action='store_true',
help='Whether to share the parameters of the query '
'and context models or not')
# checkpointing
group.add_argument('--ict-load', type=str, default=None,
help='Directory containing an ICTBertModel checkpoint')
group.add_argument('--bert-load', type=str, default=None,
help='Directory containing an BertModel checkpoint '
'(needed to start ICT and REALM)')
# data
group.add_argument('--titles-data-path', type=str, default=None,
help='Path to titles dataset used for ICT')
group.add_argument('--query-in-block-prob', type=float, default=0.1,
help='Probability of keeping query in block for '
'ICT dataset')
group.add_argument('--use-one-sent-docs', action='store_true',
help='Whether to use one sentence documents in ICT')
group.add_argument('--evidence-data-path', type=str, default=None,
help='Path to Wikipedia Evidence frm DPR paper')
# training
group.add_argument('--retriever-report-topk-accuracies', nargs='+', type=int,
default=[], help="Which top-k accuracies to report "
"(e.g. '1 5 20')")
group.add_argument('--retriever-score-scaling', action='store_true',
help='Whether to scale retriever scores by inverse '
'square root of hidden size')
# faiss index
group.add_argument('--block-data-path', type=str, default=None,
help='Where to save/load BlockData to/from')
group.add_argument('--embedding-path', type=str, default=None,
help='Where to save/load Open-Retrieval Embedding'
' data to/from')
# indexer
group.add_argument('--indexer-batch-size', type=int, default=128,
help='How large of batches to use when doing indexing '
'jobs')
group.add_argument('--indexer-log-interval', type=int, default=1000,
help='After how many batches should the indexer '
'report progress')
return parser
def _add_vision_args(parser):
group = parser.add_argument_group(title="vision")
# general vision arguments
group.add_argument('--num-classes', type=int, default=1000,
help='num of classes in vision classificaiton task')
group.add_argument('--img-h', type=int, default=224,
help='Image height for vision classification task')
group.add_argument('--img-w', type=int, default=224,
help='Image height for vision classification task')
group.add_argument('--num-channels', type=int, default=3,
help='Number of channels in input image data')
group.add_argument('--patch-dim', type=int, default=16,
help='patch dimension')
group.add_argument('--classes-fraction', type=float, default=1.0,
help='training with fraction of classes.')
group.add_argument('--data-per-class-fraction', type=float, default=1.0,
help='training with fraction of data per class.')
group.add_argument('--no-data-sharding', action='store_false',
help='Disable data sharding.',
dest='data_sharding')
group.add_argument('--head-lr-mult', type=float, default=1.0,
help='learning rate multiplier for head during finetuning')
# pretraining type and backbone selection`
group.add_argument('--vision-pretraining', action='store_true',
help='flag to indicate vision pretraining')
group.add_argument('--vision-pretraining-type', type=str, default='classify',
choices=['classify', 'inpaint', 'dino'],
help='pretraining objectives')
group.add_argument('--vision-backbone-type', type=str, default='vit',
choices=['vit', 'mit', 'swin'],
help='backbone types types')
group.add_argument('--swin-backbone-type', type=str, default='tiny',
choices=['tiny', 'base', 'h3'],
help='pretraining objectives')
# inpainting arguments
group.add_argument('--mask-type', type=str, default='random',
choices=['random', 'row'],
help='mask types')
group.add_argument('--mask-factor', type=float, default=1.0,
help='mask size scaling parameter')
# dino arguments
group.add_argument('--iter-per-epoch', type=int, default=1250,
help='iterations per epoch')
group.add_argument('--dino-local-img-size', type=int, default=96,
help='Image size for vision classification task')
group.add_argument('--dino-local-crops-number', type=int, default=10,
help='Number of local crops')
group.add_argument('--dino-head-hidden-size', type=int, default=2048,
help='Hidden dimension size in dino head')
group.add_argument('--dino-bottleneck-size', type=int, default=256,
help='Bottle neck dimension in dino head ')
group.add_argument('--dino-freeze-last-layer', type=float, default=1,
help='Freezing last layer weights')
group.add_argument('--dino-norm-last-layer', action='store_true',
help='Disable Norm in last layer.')
group.add_argument('--dino-warmup-teacher-temp', type=float, default=0.04,
help='warump teacher temperature')
group.add_argument('--dino-teacher-temp', type=float, default=0.07,
help='teacher temperature')
group.add_argument('--dino-warmup-teacher-temp-epochs', type=int, default=30,
help='warmup teacher temperaure epochs')
return parser
|
apex-master
|
apex/transformer/testing/arguments.py
|
apex-master
|
apex/transformer/testing/__init__.py
|
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import datetime
import os
import random
from typing import Optional, Union, List, Tuple, Callable, Dict
import numpy
import torch
import torch.nn as nn
from apex import transformer
from apex.transformer.tensor_parallel import(
ColumnParallelLinear,
RowParallelLinear,
scatter_to_sequence_parallel_region,
)
from apex.transformer.pipeline_parallel.utils import (
average_losses_across_data_parallel_group,
)
from apex.transformer.pipeline_parallel.schedules.common import (
Batch,
)
from apex.transformer.testing import global_vars
from apex.transformer._ucc_util import HAS_UCC
TEST_SUCCESS_MESSAGE = ">> passed the test :-)"
# note (mkozuki): `pre_process` and `post_process` are a placeholder until interleaving schedule test comes.
class MyLayer(nn.Module):
def __init__(self, hidden_size: int, pre_process: bool, post_process: bool):
super().__init__()
self.pre_process = pre_process
self.post_process = post_process
self.layer = nn.Linear(hidden_size, hidden_size)
def forward(self, x):
return self.layer(x)
class MyModel(nn.Module):
def __init__(
self,
hidden_size: int, pre_process: bool = False, post_process: bool = False,
*,
add_encoder: bool = False, add_decoder: bool = False,
) -> None:
super().__init__()
self.pre_process = pre_process
self.post_process = post_process
self.layer = MyLayer(
hidden_size=hidden_size, pre_process=pre_process, post_process=post_process
)
self.input_tensor = None
def set_input_tensor(
self, input_tensor: Union[torch.Tensor, List[torch.Tensor]]
) -> None:
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
self.input_tensor = input_tensor[0]
def forward(self, x: Optional[torch.Tensor]) -> torch.Tensor:
if self.input_tensor is None:
return self.layer(x)
return self.layer(self.input_tensor)
class ToyParallelMLP(nn.Module):
def __init__(
self,
hidden_size: int, pre_process: bool = False, post_process: bool = False,
*,
sequence_parallel_enabled: bool = False,
# TODO(mkozuki): Support these two?
add_encoder: bool = False, add_decoder: bool = False,
) -> None:
super().__init__()
self.pre_process = pre_process
self.post_process = post_process
self.sequence_parallel_enabled = sequence_parallel_enabled
ffn_hidden_size = 4 * hidden_size
self.dense_h_to_4h = ColumnParallelLinear(
hidden_size,
ffn_hidden_size,
gather_output=False,
# init_method=init_method,
skip_bias_add=True,
# use_cpu_initialization=use_cpu_initialization,
bias=True,
sequence_parallel_enabled=sequence_parallel_enabled,
no_async_tensor_model_parallel_allreduce=True,
)
self.dense_4h_to_h = RowParallelLinear(
ffn_hidden_size,
hidden_size,
input_is_parallel=True,
# init_method=output_layer_init_method,
skip_bias_add=False,
# use_cpu_initialization=use_cpu_initialization,
bias=True,
sequence_parallel_enabled=sequence_parallel_enabled,
)
self.activation_func = torch.nn.GELU()
def set_input_tensor(
self,
input_tensor: Union[torch.Tensor, List[torch.Tensor]],
) -> None:
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
self.input_tensor = input_tensor[0]
def forward(
self,
x: Optional[torch.Tensor],
) -> torch.Tensor:
"""Forward of Simplified ParallelMLP.
Args:
x: :obj:`None` if pipeline rank != pippeline first rank. When :obj:`None`,
`self.input_tensor` is taken care of by `forward_step` defined in
apex/transformer/pipeline_parallel/schedules/common.py
"""
# [s, b, h]
if self.input_tensor is None:
input = x
else:
input = self.input_tensor
intermediate_parallel, bias_parallel = self.dense_h_to_4h(input)
if bias_parallel is not None:
intermediate_parallel += bias_parallel
intermediate_parallel = self.activation_func(intermediate_parallel)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output
def model_provider_func(
hidden_size: int,
pre_process: bool,
post_process: bool,
*,
add_encoder: bool = False,
add_decoder: bool = False) -> MyModel:
return MyModel(hidden_size, pre_process, post_process, add_encoder=add_encoder, add_decoder=add_decoder)
def mlp_provider_func(
hidden_size: int,
pre_process: bool,
post_process: bool,
*,
add_encoder: bool = False,
add_decoder: bool = False,
sequence_parallel_enabled: bool = False,
) -> ToyParallelMLP:
return ToyParallelMLP(
hidden_size,
pre_process,
post_process,
add_encoder=add_encoder,
add_decoder=add_decoder,
sequence_parallel_enabled=sequence_parallel_enabled,
)
def process_batch(batch):
if isinstance(batch, list):
x = batch[0]
else:
x = batch
return x
def fwd_step_func(batch, model):
x = process_batch(batch)
y = model(x)
# note (mkozuki): I don't think this function is nice but I do think this is enough for now
# just to check the sanity of ported pipeline functions.
def loss_func(x):
loss = torch.sum(x)
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {"avg": averaged_loss}
return y, loss_func
@dataclass(frozen=True)
class ToyParallelMLPFwdBwdStepFunc:
sequence_parallel_enabled: bool
def __call__(
self,
batch: Batch,
model: torch.nn.Module,
) -> Tuple[torch.Tensor, Callable[[torch.Tensor], Tuple[torch.Tensor, Dict[str, torch.Tensor]]]]:
x = batch[0] if isinstance(batch, list) else batch
if isinstance(x, torch.Tensor):
x = x.transpose(0, 1).contiguous()
if self.sequence_parallel_enabled:
x = scatter_to_sequence_parallel_region(x)
y = model(x)
# note (mkozuki): I don't think this function is nice but I do think this is enough for now
# just to check the sanity of ported pipeline functions.
def loss_func(x):
loss = torch.sum(x)
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {"avg": averaged_loss}
return y, loss_func
class IdentityLayer(torch.nn.Module):
def __init__(self, size, scale=1.0):
super(IdentityLayer, self).__init__()
self.weight = torch.nn.Parameter(scale * torch.randn(size))
def forward(self):
return self.weight
def set_random_seed(seed):
"""Set random seed for reproducibility."""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
transformer.tensor_parallel.model_parallel_cuda_manual_seed(seed)
def initialize_distributed(backend="nccl"):
"""Initialize torch.distributed."""
# Get local rank in case it is provided.
# parser = argparse.ArgumentParser()
# parser.add_argument('--local_rank', type=int, default=None,
# help='local rank passed from distributed launcher')
# args = parser.parse_args()
if backend not in ("nccl", "ucc"):
raise RuntimeError(f"Currently only nccl & ucc are supported but {backend}")
if backend == "ucc":
if not HAS_UCC:
raise ImportError("UCC backend requires pytorch source build with UCC installed and enabled")
args = global_vars.get_args()
local_rank = args.local_rank
# Get rank and world size.
rank = int(os.getenv("RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "1"))
print(
"> initializing torch.distributed with local rank: {}, "
"rank: {}, world size: {}".format(local_rank, rank, world_size)
)
# Set the device id.
device = rank % torch.cuda.device_count()
if local_rank is not None:
device = local_rank
torch.cuda.set_device(device)
# Call the init process.
init_method = "tcp://"
master_ip = os.getenv("MASTER_ADDR", "localhost")
master_port = os.getenv("MASTER_PORT", "6000")
init_method += master_ip + ":" + master_port
torch.distributed.init_process_group(
backend=backend, world_size=world_size, rank=rank, init_method=init_method,
timeout=datetime.timedelta(seconds=60),
)
def print_separator(message):
filler_len = (78 - len(message)) // 2
filler = "-" * filler_len
string = "\n" + filler + " {} ".format(message) + filler
if torch.distributed.get_rank() == 0:
print(string, flush=True)
|
apex-master
|
apex/transformer/testing/commons.py
|
import contextlib
import torch
from apex.transformer import tensor_parallel
from apex.transformer.enums import AttnMaskType
from apex.transformer.enums import ModelType
from apex.transformer.layers import FusedLayerNorm as LayerNorm
from apex.transformer.testing.global_vars import get_args
from apex.transformer.testing.standalone_transformer_lm import (
MegatronModule,
get_language_model,
get_linear_layer,
init_method_normal,
scaled_init_method_normal,
parallel_lm_logits,
)
def bert_extended_attention_mask(attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
# Convert attention mask to binary:
extended_attention_mask = (extended_attention_mask < 0.5)
return extended_attention_mask
def bert_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
class BertLMHead(MegatronModule):
"""Masked LM head for Bert
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
hidden_size: hidden size
init_method: init method for weight initialization
layernorm_epsilon: tolerance for layer norm divisions
parallel_output: whether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, hidden_size, init_method,
layernorm_epsilon, parallel_output):
super(BertLMHead, self).__init__()
args = get_args()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
# TODO: do we need this?
# mpu.set_tensor_model_parallel_attributes(self.bias, True, 0, 1)
self.parallel_output = parallel_output
self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
setattr(self.dense.weight, 'sequence_parallel', args.sequence_parallel)
setattr(self.dense.bias, 'sequence_parallel', args.sequence_parallel)
self.layernorm = LayerNorm(
hidden_size, eps=layernorm_epsilon, sequence_parallel_enabled=args.sequence_parallel)
self.gelu = torch.nn.functional.gelu
if args.openai_gelu:
self.gelu = openai_gelu
elif args.onnx_safe:
self.gelu = erf_gelu
def forward(self, hidden_states, word_embeddings_weight):
hidden_states = self.dense(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.layernorm(hidden_states)
output = parallel_lm_logits(hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias)
return output
def post_language_model_processing(lm_output, pooled_output,
lm_head, binary_head,
lm_labels,
logit_weights,
fp16_lm_cross_entropy):
# Output.
lm_logits = lm_head(
lm_output, logit_weights)
binary_logits = None
if binary_head is not None:
binary_logits = binary_head(pooled_output)
if lm_labels is None:
# [s b h] => [b s h]
return lm_logits.transpose(0, 1).contiguous(), binary_logits
else:
# [b s] => [s b]
lm_labels = lm_labels.transpose(0, 1).contiguous()
# lm_logits: [s b h] lm_labels: [s b]
if fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(),
lm_labels)
return lm_loss, binary_logits
class BertModel(MegatronModule):
"""Bert Language model."""
def __init__(self,
num_tokentypes=2,
add_binary_head=True,
parallel_output=True,
pre_process=True,
post_process=True,
cpu_offload=False):
super(BertModel, self).__init__()
args = get_args()
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.add_binary_head = add_binary_head
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
init_method = init_method_normal(args.init_method_std)
scaled_init_method = scaled_init_method_normal(args.init_method_std,
args.num_layers)
self.language_model, self._language_model_key = get_language_model(
num_tokentypes=num_tokentypes,
add_pooler=self.add_binary_head,
encoder_attn_mask_type=AttnMaskType.padding,
init_method=init_method,
scaled_init_method=scaled_init_method,
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings(init_method_normal)
if self.post_process:
self.lm_head = BertLMHead(
self.word_embeddings_weight().size(0),
args.hidden_size, init_method, args.layernorm_epsilon, parallel_output)
self._lm_head_key = 'lm_head'
self.binary_head = None
if self.add_binary_head:
self.binary_head = get_linear_layer(args.hidden_size, 2,
init_method)
self._binary_head_key = 'binary_head'
self.forward_context = contextlib.nullcontext
if cpu_offload:
self.forward_context = torch.autograd.graph.save_on_cpu
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, bert_model_input, attention_mask,
tokentype_ids=None, lm_labels=None):
with self.forward_context():
extended_attention_mask = bert_extended_attention_mask(attention_mask)
input_ids = bert_model_input
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids
)
if self.post_process and self.add_binary_head:
lm_output, pooled_output = lm_output
else:
pooled_output = None
if self.post_process:
return post_language_model_processing(lm_output, pooled_output,
self.lm_head, self.binary_head,
lm_labels,
self.word_embeddings_weight(),
self.fp16_lm_cross_entropy)
else:
return lm_output
# NOTE(mkozuki): This method is not maintained as apex only tests forward_backward with best effort.
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(
destination, prefix, keep_vars)
if self.post_process:
state_dict_[self._lm_head_key] \
= self.lm_head.state_dict_for_save_checkpoint(
destination, prefix, keep_vars)
if self.post_process and self.add_binary_head:
state_dict_[self._binary_head_key] \
= self.binary_head.state_dict(destination, prefix, keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(destination, prefix, keep_vars)
return state_dict_
# NOTE(mkozuki): This method is not maintained as apex only tests forward_backward with best effort.
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process:
self.lm_head.load_state_dict(
state_dict[self._lm_head_key], strict=strict)
if self.post_process and self.add_binary_head:
self.binary_head.load_state_dict(
state_dict[self._binary_head_key], strict=strict)
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
def bert_model_provider(pre_process=True, post_process=True, cpu_offload=False):
args = get_args()
num_tokentypes = 2 if args.bert_binary_head else 0
model = BertModel(
num_tokentypes=num_tokentypes,
add_binary_head=args.bert_binary_head,
parallel_output=True,
pre_process=pre_process,
post_process=post_process,
cpu_offload=cpu_offload,
)
return model
|
apex-master
|
apex/transformer/testing/standalone_bert.py
|
import os
import sys
import unittest
from packaging.version import Version, parse
import torch
from torch import distributed as dist
from torch.utils import collect_env
from torch.testing._internal import common_utils
from torch.testing._internal import common_distributed
from apex.transformer._ucc_util import HAS_UCC
# NOTE(mkozuki): Version guard for ucc. ref: https://github.com/openucx/ucc/issues/496
_TORCH_UCC_COMPAT_NVIDIA_DRIVER_VERSION = Version("470.42.01")
_driver_version = None
if torch.cuda.is_available():
_driver_version = parse(collect_env.get_nvidia_driver_version(collect_env.run))
HAS_TORCH_UCC_COMPAT_NVIDIA_DRIVER = _driver_version is not None and _driver_version >= _TORCH_UCC_COMPAT_NVIDIA_DRIVER_VERSION
class DistributedTestBase(common_distributed.MultiProcessTestCase):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def setUp(self) -> None:
super().setUp()
self._setup_pre_spawn()
self._spawn_processes()
def tearDown(self) -> None:
torch.cuda.empty_cache()
super().tearDown()
@property
def world_size(self) -> int:
return min(torch.cuda.device_count(), 4)
@property
def init_method(self):
return f"{common_utils.FILE_SCHEMA}{self.file_name}"
@classmethod
def _run(cls, rank, test_name, file_name, pipe):
self = cls(test_name)
self.assertTrue(torch.cuda.is_available())
self.assertTrue(hasattr(self, "DISTRIBUTED_BACKEND"))
self.rank = rank
self.file_name = file_name
print(f"[dist init] rank = {self.rank}, world_size = {self.world_size}")
try:
dist.init_process_group(
init_method=self.init_method,
backend=self.DISTRIBUTED_BACKEND,
world_size=int(self.world_size),
rank=self.rank,
)
except RuntimeError as e:
if "recompile" in e.args[0]:
print(f"Backend of {self.DISTRIBUTED_BACKEND} not available")
sys.exit(0)
raise
torch.cuda.set_device(self.rank % torch.cuda.device_count())
dist.barrier()
self.run_test(test_name, pipe)
dist.barrier()
dist.destroy_process_group()
sys.exit(0)
def _setup_pre_spawn(self):
pass
class NcclDistributedTestBase(DistributedTestBase):
DISTRIBUTED_BACKEND = "nccl"
@unittest.skipUnless(
HAS_UCC,
"Requires either torch ucc or pytorch build from source with native ucc installed and enabled",
)
@unittest.skipUnless(
HAS_TORCH_UCC_COMPAT_NVIDIA_DRIVER,
f"`torch_ucc` requires NVIDIA driver >= {_TORCH_UCC_COMPAT_NVIDIA_DRIVER_VERSION} but {_driver_version} found. "
"See https://github.com/openucx/ucc/issues/496",
)
class UccDistributedTestBase(DistributedTestBase):
DISTRIBUTED_BACKEND = "ucc"
def _setup_pre_spawn(self) -> None:
self.master_addr = "localhost"
os.environ["MASTER_ADDR"] = "localhost"
self._has_master_port = "MASTER_PORT" in os.environ
if self._has_master_port:
self.master_port = os.environ["MASTER_PORT"]
else:
try:
from caffe2.torch.fb.common.utils import get_free_port
self.master_port = str(get_free_port())
except ImportError:
self.master_port = "12375"
os.environ["MASTER_PORT"] = self.master_port
self._has_ucx_tls = "UCX_TLS" in os.environ
if not self._has_ucx_tls:
os.environ["UCX_TLS"] = "tcp,cuda"
print('os.environ[\"UCX_TLS\"] = {}'.format(os.environ["UCX_TLS"]))
def tearDown(self) -> None:
super().tearDown()
if not self._has_master_port:
del os.environ["MASTER_PORT"]
if not self._has_ucx_tls:
del os.environ["UCX_TLS"]
@property
def init_method(self):
return "tcp://localhost:" + os.environ["MASTER_PORT"]
|
apex-master
|
apex/transformer/testing/distributed_test_base.py
|
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import torch
from apex.transformer.enums import AttnMaskType
from apex.transformer.enums import ModelType
from apex.transformer import tensor_parallel
from apex.transformer.testing.global_vars import get_args
from apex.transformer.testing.standalone_transformer_lm import MegatronModule
from apex.transformer.testing.standalone_transformer_lm import parallel_lm_logits
from apex.transformer.testing.standalone_transformer_lm import post_language_model_processing
from apex.transformer.testing.standalone_transformer_lm import get_language_model
from apex.transformer.testing.standalone_transformer_lm import init_method_normal
from apex.transformer.testing.standalone_transformer_lm import (
scaled_init_method_normal,
)
def gpt_model_provider(pre_process: bool = True, post_process: bool = True, cpu_offload: bool = False,) -> "GPTModel":
args = get_args()
model = GPTModel(
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process,
cpu_offload=args.cpu_offload,
)
return model
class GPTModel(MegatronModule):
"""GPT-2 Language model."""
def __init__(
self,
num_tokentypes:int = 0,
parallel_output: bool = True,
pre_process: bool = True,
post_process: bool = True,
cpu_offload: bool = False,
):
super().__init__()
args = get_args()
self.forward_context = contextlib.nullcontext
if cpu_offload:
self.forward_context = torch.autograd.graph.save_on_cpu
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.language_model, self._language_model_key = get_language_model(
num_tokentypes=num_tokentypes,
add_pooler=False,
encoder_attn_mask_type=AttnMaskType.causal,
init_method=init_method_normal(args.init_method_std),
scaled_init_method=scaled_init_method_normal(
args.init_method_std, args.num_layers
),
pre_process=self.pre_process,
post_process=self.post_process,
)
self.initialize_word_embeddings(init_method_normal)
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(
self,
input_ids,
position_ids,
attention_mask,
labels=None,
tokentype_ids=None,
inference_params=None,
):
with self.forward_context():
lm_output = self.language_model(
input_ids, position_ids, attention_mask, inference_params=inference_params
)
if self.post_process:
return post_language_model_processing(
lm_output,
# note(mkozuki): Am I overlooking some order of dim change?
labels.t().contiguous(),
self.word_embeddings_weight(),
self.parallel_output,
self.fp16_lm_cross_entropy,
)
else:
return lm_output
|
apex-master
|
apex/transformer/testing/standalone_gpt.py
|
# coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT-2 model."""
import enum
import math
import contextlib
import json
import torch
import torch.nn.functional as F
import apex.transformer.utils
from apex.transformer.layers import FusedLayerNorm as LayerNorm
from apex.transformer.functional import FusedScaleMaskSoftmax
from apex.transformer import tensor_parallel
from apex.transformer.tensor_parallel.layers import ColumnParallelLinear
from apex.transformer.tensor_parallel.layers import RowParallelLinear
from apex.transformer.tensor_parallel.layers import VocabParallelEmbedding
from apex.transformer.tensor_parallel.mappings import scatter_to_sequence_parallel_region
from apex.transformer import parallel_state
from apex.transformer.testing.global_vars import get_args
from apex.transformer.enums import ModelType
from apex.transformer.enums import LayerType
from apex.transformer.enums import AttnType
from apex.transformer.enums import AttnMaskType
from apex.transformer.log_util import get_transformer_logger
_logger = get_transformer_logger(__name__)
def param_is_not_shared(param: torch.Tensor) -> bool:
return getattr(param, "shared", False)
class MegatronModule(torch.nn.Module):
"""Megatron specific extensions of torch Module with support for pipelining."""
def __init__(self, share_word_embeddings: bool = True) -> None:
super().__init__()
self.share_word_embeddings = share_word_embeddings
def word_embeddings_weight(self):
if self.pre_process:
return self.language_model.embedding.word_embeddings.weight
else:
if not self.share_word_embeddings:
raise Exception('word_embeddings_weight() called for last stage, but share_word_embeddings is false')
return self.word_embeddings.weight
def initialize_word_embeddings(self, init_method_normal):
args = get_args()
if not self.share_word_embeddings:
raise Exception("initialize_word_embeddings() was called but share_word_embeddings is false")
# This function just initializes the word embeddings in the final stage
# when we are using pipeline parallelism. Nothing to do if we aren't
# using pipeline parallelism.
if args.pipeline_model_parallel_size == 1:
return
# Parameters are shared between the word embeddings layers, and the
# heads at the end of the model. In a pipelined setup with more than
# one stage, the initial embedding layer and the head are on different
# workers, so we do the following:
# 1. Create a second copy of word_embeddings on the last stage, with
# initial parameters of 0.0.
# 2. Do an all-reduce between the first and last stage to ensure that
# the two copies of word_embeddings start off with the same
# parameter values.
# 3. In the training loop, before an all-reduce between the grads of
# the two word_embeddings layers to ensure that every applied weight
# update is the same on both stages.
if parallel_state.is_pipeline_last_stage() and not self.pre_process:
assert not parallel_state.is_pipeline_first_stage()
self._word_embeddings_for_head_key = 'word_embeddings_for_head'
# set word_embeddings weights to 0 here, then copy first
# stage's weights using all_reduce below.
self.word_embeddings = VocabParallelEmbedding(
args.padded_vocab_size, args.hidden_size,
init_method=init_method_normal(args.init_method_std))
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
# Zero out initial weights for decoder embedding.
# NOTE: We don't currently support T5 with the interleaved schedule.
if not parallel_state.is_pipeline_first_stage(ignore_virtual=True) and self.pre_process:
self.language_model.embedding.zero_parameters()
# Ensure that first and last stages have the same initial parameter
# values.
if torch.distributed.is_initialized():
if parallel_state.is_rank_in_embedding_group():
torch.distributed.all_reduce(self.word_embeddings_weight(),
group=parallel_state.get_embedding_group())
# Ensure that encoder(first stage) and decoder(split stage) position
# embeddings have the same initial parameter values
# NOTE: We don't currently support T5 with the interleaved schedule.
if parallel_state.is_rank_in_position_embedding_group() and \
args.pipeline_model_parallel_split_rank is not None:
# TODO: Support tokentype embedding.
self.language_model.embedding.cuda()
position_embeddings = self.language_model.embedding.position_embeddings
torch.distributed.all_reduce(position_embeddings.weight,
group=parallel_state.get_position_embedding_group())
else:
print("WARNING! Distributed processes aren't initialized, so "
"word embeddings in the last layer are not initialized. "
"If you are just manipulating a model this is fine, but "
"this needs to be handled manually. If you are training "
"something is definitely wrong.")
def get_linear_layer(rows, columns, init_method):
"""Simple linear layer with weight initialization."""
layer = torch.nn.Linear(rows, columns)
init_method(layer.weight)
with torch.no_grad():
layer.bias.zero_()
return layer
# NOTE(mkozuki): Avoid inplace op.
def attention_mask_func(attention_scores: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
# attention_scores.masked_fill_(attention_mask, -10000.0)
# return attention_scores
return attention_scores.masked_fill(attention_mask, -10000.0)
def init_method_normal(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
def scaled_init_method_normal(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
class ParallelMLP(MegatronModule):
"""MLP.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform nonlinear transformation, and project the
state back into h hidden dimension.
"""
def __init__(self, init_method, output_layer_init_method):
super().__init__()
args = get_args()
# Project to 4h.
self.dense_h_to_4h = ColumnParallelLinear(
args.hidden_size,
args.ffn_hidden_size,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
no_async_tensor_model_parallel_allreduce=not args.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=args.sequence_parallel,
)
self.bias_gelu_fusion = args.bias_gelu_fusion
self.activation_func = F.gelu
# Project back to h.
self.dense_4h_to_h = RowParallelLinear(
args.ffn_hidden_size,
args.hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
sequence_parallel_enabled=args.sequence_parallel,
)
def forward(self, hidden_states):
# [s, b, 4hp]
intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output, output_bias
class CoreAttention(MegatronModule):
def __init__(self, layer_number, attn_mask_type=AttnMaskType.padding):
super().__init__()
args = get_args()
self.fp16 = args.fp16
self.bf16 = args.bf16
self.apply_query_key_layer_scaling = args.apply_query_key_layer_scaling
self.attention_softmax_in_fp32 = args.attention_softmax_in_fp32
if self.apply_query_key_layer_scaling:
self.attention_softmax_in_fp32 = True
self.layer_number = max(1, layer_number)
self.attn_mask_type = attn_mask_type
self.sequence_parallel = args.sequence_parallel
projection_size = args.kv_channels * args.num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_partition = apex.transformer.utils.divide(
projection_size, world_size
)
self.hidden_size_per_attention_head = apex.transformer.utils.divide(
projection_size, args.num_attention_heads
)
self.num_attention_heads_per_partition = apex.transformer.utils.divide(
args.num_attention_heads, world_size
)
coeff = None
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
if self.apply_query_key_layer_scaling:
coeff = self.layer_number
self.norm_factor *= coeff
self.scale_mask_softmax = FusedScaleMaskSoftmax(
self.fp16,
self.bf16,
self.attn_mask_type,
args.masked_softmax_fusion,
attention_mask_func,
self.attention_softmax_in_fp32,
coeff,
)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = torch.nn.Dropout(args.attention_dropout)
def forward(self, query_layer, key_layer, value_layer, attention_mask):
# ===================================
# Raw attention scores. [b, np, s, s]
# ===================================
# [b, np, sq, sk]
output_size = (
query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0),
)
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.view(
output_size[2], output_size[0] * output_size[1], -1
)
# [sk, b, np, hn] -> [sk, b * np, hn]
key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
# preallocting input tensor: [b * np, sq, sk]
matmul_input_buffer = torch.empty(
output_size[0] * output_size[1],
output_size[2],
output_size[3],
dtype=query_layer.dtype,
device=torch.cuda.current_device(),
)
# Raw attention scores. [b * np, sq, sk]
matmul_result = torch.baddbmm(
matmul_input_buffer,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=(1.0 / self.norm_factor),
)
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
# ===========================
# Attention probs and dropout
# ===========================
# attention scores and attention mask [b, np, sq, sk]
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
if not self.sequence_parallel:
with tensor_parallel.get_cuda_rng_tracker().fork():
attention_probs = self.attention_dropout(attention_probs)
else:
attention_probs = self.attention_dropout(attention_probs)
# =========================
# Context layer. [sq, b, hp]
# =========================
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
# context layer shape: [b, np, sq, hn]
output_size = (
value_layer.size(1),
value_layer.size(2),
query_layer.size(0),
value_layer.size(3),
)
# change view [sk, b * np, hn]
value_layer = value_layer.view(
value_layer.size(0), output_size[0] * output_size[1], -1
)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(
output_size[0] * output_size[1], output_size[2], -1
)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + (
self.hidden_size_per_partition,
)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class ParallelAttention(MegatronModule):
"""Parallel self-attention layer abstract class.
Self-attention layer takes input with size [b, s, h]
and returns output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
attention_type=AttnType.self_attn,
attn_mask_type=AttnMaskType.padding,
):
super().__init__()
args = get_args()
self.layer_number = max(1, layer_number)
self.attention_type = attention_type
self.attn_mask_type = attn_mask_type
self.params_dtype = args.params_dtype
projection_size = args.kv_channels * args.num_attention_heads
# Per attention head and per partition values.
world_size = parallel_state.get_tensor_model_parallel_world_size()
self.hidden_size_per_attention_head = apex.transformer.utils.divide(
projection_size, args.num_attention_heads
)
self.num_attention_heads_per_partition = apex.transformer.utils.divide(
args.num_attention_heads, world_size
)
# Strided linear layer.
if attention_type == AttnType.self_attn:
self.query_key_value = ColumnParallelLinear(
args.hidden_size,
3 * projection_size,
gather_output=False,
init_method=init_method,
no_async_tensor_model_parallel_allreduce=not args.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=args.sequence_parallel,
)
else:
assert attention_type == AttnType.cross_attn
self.query = ColumnParallelLinear(
args.hidden_size,
projection_size,
gather_output=False,
init_method=init_method,
no_async_tensor_model_parallel_allreduce=not args.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=args.sequence_parallel,
)
self.key_value = ColumnParallelLinear(
args.hidden_size,
2 * projection_size,
gather_output=False,
init_method=init_method,
no_async_tensor_model_parallel_allreduce=not args.async_tensor_model_parallel_allreduce,
sequence_parallel_enabled=args.sequence_parallel,
)
self.core_attention = CoreAttention(self.layer_number, self.attn_mask_type)
self.checkpoint_core_attention = args.recompute_granularity == "selective"
# Output.
self.dense = RowParallelLinear(
projection_size,
args.hidden_size,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
sequence_parallel_enabled=args.sequence_parallel,
)
def _checkpointed_attention_forward(
self, query_layer, key_layer, value_layer, attention_mask
):
"""Forward method with activation checkpointing."""
def custom_forward(*inputs):
query_layer = inputs[0]
key_layer = inputs[1]
value_layer = inputs[2]
attention_mask = inputs[3]
output_ = self.core_attention(
query_layer, key_layer, value_layer, attention_mask
)
return output_
hidden_states = tensor_parallel.checkpoint(
custom_forward, False, query_layer, key_layer, value_layer, attention_mask
)
return hidden_states
def _allocate_memory(self, inference_max_sequence_len, batch_size):
return torch.empty(
inference_max_sequence_len,
batch_size,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
dtype=self.params_dtype,
device=torch.cuda.current_device(),
)
def forward(
self, hidden_states, attention_mask, encoder_output=None, inference_params=None
):
# hidden_states: [sq, b, h]
# =================================================
# Pre-allocate memory for key-values for inference.
# =================================================
if inference_params:
if self.layer_number not in inference_params.key_value_memory_dict:
inf_max_seq_len = inference_params.max_sequence_len
inf_max_batch_size = inference_params.max_batch_size
inference_key_memory = self._allocate_memory(
inf_max_seq_len, inf_max_batch_size
)
inference_value_memory = self._allocate_memory(
inf_max_seq_len, inf_max_batch_size
)
inference_params.key_value_memory_dict[self.layer_number] = (
inference_key_memory,
inference_value_memory,
)
else:
(
inference_key_memory,
inference_value_memory,
) = inference_params.key_value_memory_dict[self.layer_number]
# =====================
# Query, Key, and Value
# =====================
if self.attention_type == AttnType.self_attn:
# Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
mixed_x_layer, _ = self.query_key_value(hidden_states)
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
new_tensor_shape = mixed_x_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
(
query_layer,
key_layer,
value_layer,
) = tensor_parallel.utils.split_tensor_along_last_dim(mixed_x_layer, 3)
else:
# Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
mixed_kv_layer, _ = self.key_value(encoder_output)
# [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
new_tensor_shape = mixed_kv_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
2 * self.hidden_size_per_attention_head,
)
mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)
# [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
(
key_layer,
value_layer,
) = tensor_parallel.utils.split_tensor_along_last_dim(mixed_kv_layer, 2)
# Attention head [sq, b, h] --> [sq, b, hp]
query_layer, _ = self.query(hidden_states)
# [sq, b, hp] --> [sq, b, np, hn]
new_tensor_shape = query_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
query_layer = query_layer.view(*new_tensor_shape)
# ==================================
# Adjust key and value for inference
# ==================================
if inference_params:
batch_start = inference_params.batch_size_offset
batch_end = batch_start + key_layer.size(1)
assert batch_end <= inference_key_memory.size(1)
sequence_start = inference_params.sequence_len_offset
sequence_end = sequence_start + key_layer.size(0)
assert sequence_end <= inference_key_memory.size(0)
# Copy key and values.
inference_key_memory[
sequence_start:sequence_end, batch_start:batch_end, ...
] = key_layer
inference_value_memory[
sequence_start:sequence_end, batch_start:batch_end, ...
] = value_layer
key_layer = inference_key_memory[:sequence_end, batch_start:batch_end, ...]
value_layer = inference_value_memory[
:sequence_end, batch_start:batch_end, ...
]
# ==================================
# core attention computation
# ==================================
if self.checkpoint_core_attention:
context_layer = self._checkpointed_attention_forward(
query_layer, key_layer, value_layer, attention_mask
)
else:
context_layer = self.core_attention(
query_layer, key_layer, value_layer, attention_mask
)
# =================
# Output. [sq, b, h]
# =================
output, bias = self.dense(context_layer)
return output, bias
def bias_dropout_add(x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
out = residual + out
return out
def get_bias_dropout_add(training):
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
class ParallelTransformerLayer(MegatronModule):
"""A single transformer layer.
Transformer layer takes input with size [s, b, h] and returns an
output of the same size.
"""
def __init__(
self,
init_method,
output_layer_init_method,
layer_number,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
drop_path_rate=0.0,
):
args = get_args()
super().__init__()
self.layer_number = layer_number
self.layer_type = layer_type
self.apply_residual_connection_post_layernorm = (
args.apply_residual_connection_post_layernorm
)
self.bf16 = args.bf16
self.fp32_residual_connection = args.fp32_residual_connection
# Layernorm on the input data.
self.input_layernorm = LayerNorm(
args.hidden_size,
eps=args.layernorm_epsilon,
# no_persist_layer_norm=args.no_persist_layer_norm,
sequence_parallel_enabled=args.sequence_parallel,
)
# Self attention.
self.self_attention = ParallelAttention(
init_method,
output_layer_init_method,
layer_number,
attention_type=AttnType.self_attn,
attn_mask_type=self_attn_mask_type,
)
self.hidden_dropout = args.hidden_dropout
self.bias_dropout_fusion = args.bias_dropout_fusion
# note(mkozuki)
# self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else None
assert drop_path_rate <= 0.0
self.drop_path = None
# Layernorm on the attention output
self.post_attention_layernorm = LayerNorm(
args.hidden_size,
eps=args.layernorm_epsilon,
# no_persist_layer_norm=args.no_persist_layer_norm,
sequence_parallel_enabled=args.sequence_parallel,
)
if self.layer_type == LayerType.decoder:
self.inter_attention = ParallelAttention(
init_method,
output_layer_init_method,
layer_number,
attention_type=AttnType.cross_attn,
)
# Layernorm on the attention output.
self.post_inter_attention_layernorm = LayerNorm(
args.hidden_size,
eps=args.layernorm_epsilon,
# no_persist_layer_norm=args.no_persist_layer_norm,
sequence_parallel_enabled=args.sequence_parallel,
)
# MLP
# note(mkozuki)
assert args.num_experts is None
# if args.num_experts is not None:
# self.mlp = SwitchMLP(init_method, output_layer_init_method)
# else:
# self.mlp = ParallelMLP(init_method, output_layer_init_method)
self.mlp = ParallelMLP(init_method, output_layer_init_method)
# Set bias+dropout+add fusion grad_enable execution handler.
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)
self.bias_dropout_add_exec_handler = (
contextlib.nullcontext if use_nvfuser else torch.enable_grad
)
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
inference_params=None,
):
# hidden_states: [s, b, h]
# Layer norm at the beginning of the transformer layer.
layernorm_output = self.input_layernorm(hidden_states)
# Self attention.
attention_output, attention_bias = self.self_attention(
layernorm_output, attention_mask, inference_params=inference_params
)
# Residual connection.
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
if self.drop_path is None:
bias_dropout_add_func = get_bias_dropout_add(self.training)
with self.bias_dropout_add_exec_handler():
layernorm_input = bias_dropout_add_func(
attention_output,
attention_bias.expand_as(residual),
residual,
self.hidden_dropout,
)
else:
out = torch.nn.functional.dropout(
attention_output + attention_bias,
p=self.hidden_dropout,
training=self.training,
)
layernorm_input = residual + self.drop_path(out)
# Layer norm post the self attention.
layernorm_output = self.post_attention_layernorm(layernorm_input)
if self.layer_type == LayerType.decoder:
attention_output, attention_bias = self.inter_attention(
layernorm_output, enc_dec_attn_mask, encoder_output=encoder_output
)
# residual connection
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = layernorm_input
with self.bias_dropout_add_exec_handler():
layernorm_input = bias_dropout_add_func(
attention_output,
attention_bias.expand_as(residual),
residual,
self.hidden_dropout,
)
# Layer norm post the decoder attention
layernorm_output = self.post_inter_attention_layernorm(layernorm_input)
# MLP.
mlp_output, mlp_bias = self.mlp(layernorm_output)
# Second residual connection.
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = layernorm_input
if self.drop_path is None:
with self.bias_dropout_add_exec_handler():
output = bias_dropout_add_func(
mlp_output,
mlp_bias.expand_as(residual),
residual,
self.hidden_dropout,
)
else:
out = torch.nn.functional.dropout(
mlp_output + mlp_bias, p=self.hidden_dropout, training=self.training
)
output = residual + self.drop_path(out)
return output
class ParallelTransformer(MegatronModule):
"""Transformer class."""
def __init__(
self,
init_method,
output_layer_init_method,
layer_type=LayerType.encoder,
self_attn_mask_type=AttnMaskType.padding,
post_layer_norm=True,
pre_process=True,
post_process=True,
drop_path_rate=0.0,
):
super().__init__()
args = get_args()
self.layer_type = layer_type
self.model_type = args.model_type
self.bf16 = args.bf16
self.fp32_residual_connection = args.fp32_residual_connection
self.post_layer_norm = post_layer_norm
self.pre_process = pre_process
self.post_process = post_process
self.input_tensor = None
self.drop_path_rate = drop_path_rate
# Store activation checkpoiting flag.
self.recompute_granularity = args.recompute_granularity
self.recompute_method = args.recompute_method
self.recompute_num_layers = args.recompute_num_layers
self.distribute_saved_activations = (
args.distribute_saved_activations and not args.sequence_parallel
)
self.sequence_parallel = args.sequence_parallel
# Number of layers.
self.num_layers = get_num_layers(
args, args.model_type == ModelType.encoder_and_decoder
)
self.drop_path_rates = [
rate.item()
for rate in torch.linspace(0, self.drop_path_rate, args.num_layers)
]
# Transformer layers.
def build_layer(layer_number):
return ParallelTransformerLayer(
init_method,
output_layer_init_method,
layer_number,
layer_type=layer_type,
self_attn_mask_type=self_attn_mask_type,
drop_path_rate=self.drop_path_rates[layer_number - 1],
)
if args.virtual_pipeline_model_parallel_size is not None:
assert args.num_layers % args.virtual_pipeline_model_parallel_size == 0, (
"num_layers_per_stage must be divisible by "
"virtual_pipeline_model_parallel_size"
)
assert args.model_type != ModelType.encoder_and_decoder
# Number of layers in each model chunk is the number of layers in the stage,
# divided by the number of model chunks in a stage.
self.num_layers = (
self.num_layers // args.virtual_pipeline_model_parallel_size
)
# With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0] [2] [4] [6]
# Stage 1: [1] [3] [5] [7]
# With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0, 1] [4, 5]
# Stage 1: [2, 3] [6, 7]
offset = parallel_state.get_virtual_pipeline_model_parallel_rank() * (
args.num_layers // args.virtual_pipeline_model_parallel_size
) + (parallel_state.get_pipeline_model_parallel_rank() * self.num_layers)
else:
# Each stage gets a contiguous set of layers.
if (
args.model_type == ModelType.encoder_and_decoder
and parallel_state.get_pipeline_model_parallel_world_size() > 1
):
pipeline_rank = parallel_state.get_pipeline_model_parallel_rank()
if layer_type == LayerType.encoder:
offset = pipeline_rank * self.num_layers
else:
num_ranks_in_enc = args.pipeline_model_parallel_split_rank
offset = (pipeline_rank - num_ranks_in_enc) * self.num_layers
else:
offset = (
parallel_state.get_pipeline_model_parallel_rank() * self.num_layers
)
if self.num_layers == 0:
# When a standalone embedding stage is used (e.g.,
# args.standalone_embedding_stage == True), virtual pipeline ranks
# on pipeline rank 0 will have zero transformer layers assigned to
# them. This results in the model's input and output tensors to be
# the same, which will cause failure for certain output tensor
# optimizations (e.g., pipeline output deallocation). To remedy
# this, we assign a 'no-op' layer on these ranks, which will
# disconnect the input tensor from the output tensor.
self.num_layers = 1
self.layers = torch.nn.ModuleList([NoopTransformerLayer(1)])
else:
self.layers = torch.nn.ModuleList(
[build_layer(i + 1 + offset) for i in range(self.num_layers)]
)
if self.post_process and self.post_layer_norm:
# Final layer norm before output.
self.final_layernorm = LayerNorm(
args.hidden_size,
eps=args.layernorm_epsilon,
# no_persist_layer_norm=args.no_persist_layer_norm,
sequence_parallel_enabled=args.sequence_parallel,
)
def _get_layer(self, layer_number):
return self.layers[layer_number]
def _checkpointed_forward(
self, hidden_states, attention_mask, encoder_output, enc_dec_attn_mask
):
"""Forward method with activation checkpointing."""
def custom(start, end):
def custom_forward(*inputs):
x_ = inputs[0]
attention_mask = inputs[1]
encoder_output = inputs[2]
enc_dec_attn_mask = inputs[3]
for index in range(start, end):
layer = self._get_layer(index)
x_ = layer(x_, attention_mask, encoder_output, enc_dec_attn_mask)
return x_
return custom_forward
if self.recompute_method == "uniform":
# Uniformly divide the total number of Transformer layers and checkpoint
# the input activation of each divided chunk.
# A method to further reduce memory usage reducing checkpoints.
l = 0
while l < self.num_layers:
hidden_states = tensor_parallel.random.checkpoint(
custom(l, l + self.recompute_num_layers),
self.distribute_saved_activations,
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
l += self.recompute_num_layers
elif self.recompute_method == "block":
# Checkpoint the input activation of only a set number of individual
# Transformer layers and skip the rest.
# A method fully use the device memory removing redundant re-computation.
for l in range(self.num_layers):
if l < self.recompute_num_layers:
hidden_states = tensor_parallel.random.checkpoint(
custom(l, l + 1),
self.distribute_saved_activations,
hidden_states,
attention_mask,
encoder_output,
enc_dec_attn_mask,
)
else:
hidden_states = custom(l, l + 1)(
hidden_states, attention_mask, encoder_output, enc_dec_attn_mask
)
else:
raise ValueError("Invalid activation recompute method.")
return hidden_states
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
inference_params=None,
):
# hidden_states: [s, b, h]
# Checks.
if inference_params:
assert (
self.recompute_granularity is None
), "inference does not work with activation checkpointing"
if not self.pre_process:
# See set_input_tensor()
hidden_states = self.input_tensor
# Viewless tensor.
# - We only need to create a viewless tensor in the case of micro batch
# size (mbs) == 1, since in this case, 'hidden_states.transpose()'
# above creates a view tensor, and '.contiguous()' is a pass-through.
# For mbs >= 2, '.contiguous()' creates a new tensor, eliminating
# the need to make it viewless.
#
# However, we don't explicitly check mbs == 1 here because
# make_viewless_tensor() has negligible overhead when its input
# is already viewless.
#
# - For the 'else' case above, calling make_viewless_tensor() here is
# likely redundant, since p2p_communication.py (likely originator)
# already creates viewless tensors. That said, make_viewless_tensor()
# is called here to be future-proof and corner-case-proof.
# hidden_states = mpu.make_viewless_tensor(hidden_states, requires_grad=True, keep_graph=True)
if self.sequence_parallel:
rng_context = tensor_parallel.get_cuda_rng_tracker().fork()
else:
rng_context = contextlib.nullcontext()
with rng_context:
# Forward pass.
if self.recompute_granularity == "full":
hidden_states = self._checkpointed_forward(
hidden_states, attention_mask, encoder_output, enc_dec_attn_mask
)
else:
for index in range(self.num_layers):
layer = self._get_layer(index)
hidden_states = layer(
hidden_states,
attention_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
)
# Final layer norm.
if self.post_process and self.post_layer_norm:
hidden_states = self.final_layernorm(hidden_states)
return hidden_states
def get_num_layers(args, is_encoder_and_decoder_model):
"""Compute the number of transformer layers resident on the current rank."""
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
if is_encoder_and_decoder_model:
assert args.pipeline_model_parallel_split_rank is not None
# When a standalone embedding stage is used, a rank is taken from
# the encoder's ranks, to be used for the encoder's embedding
# layer. This way, the rank referenced by the 'split rank' remains
# the same whether or not a standalone embedding stage is used.
num_ranks_in_encoder = (
args.pipeline_model_parallel_split_rank - 1
if args.standalone_embedding_stage
else args.pipeline_model_parallel_split_rank
)
num_ranks_in_decoder = (
args.transformer_pipeline_model_parallel_size - num_ranks_in_encoder
)
assert args.num_layers % num_ranks_in_encoder == 0, (
"num_layers (%d) must be divisible by number of ranks given to encoder (%d)"
% (
args.num_layers,
num_ranks_in_encoder,
)
)
assert args.num_layers % num_ranks_in_decoder == 0, (
"num_layers (%d) must be divisible by number of ranks given to decoder (%d)"
% (
args.num_layers,
num_ranks_in_decoder,
)
)
if parallel_state.is_pipeline_stage_before_split():
num_layers = (
0
if args.standalone_embedding_stage
and parallel_state.get_pipeline_model_parallel_rank() == 0
else args.num_layers // num_ranks_in_encoder
)
else:
num_layers = args.num_layers // num_ranks_in_decoder
else:
assert (
args.num_layers % args.transformer_pipeline_model_parallel_size == 0
), "num_layers must be divisible by transformer_pipeline_model_parallel_size"
# When a standalone embedding stage is used, all transformer layers
# are divided among pipeline rank >= 1, while on pipeline rank 0,
# ranks either contain the input embedding layer (virtual pp rank 0),
# or no layers at all (virtual pp rank >= 1).
num_layers = (
0
if args.standalone_embedding_stage
and parallel_state.get_pipeline_model_parallel_rank() == 0
else args.num_layers // args.transformer_pipeline_model_parallel_size
)
else:
num_layers = args.num_layers
return num_layers
class NoopTransformerLayer(MegatronModule):
"""A single 'no-op' transformer layer.
The sole purpose of this layer is for when a standalone embedding layer
is used (i.e., args.standalone_embedding_stage == True). In this case,
zero transformer layers are assigned when pipeline rank == 0. Additionally,
when virtual pipeline rank >= 1, zero total model parameters are created
(virtual rank 0 contains the input embedding). This results in the model's
input and output tensors being the same, which causes an error when
performing certain memory optimiations on the output tensor (e.g.,
deallocating it). Thus, this layer disconnects the input from the output
via a clone. Since ranks containing a no-op layer are generally under-
utilized (both compute and memory), there's no worry of any performance
degredation.
"""
def __init__(self, layer_number):
super().__init__()
self.layer_number = layer_number
def forward(
self,
hidden_states,
attention_mask,
encoder_output=None,
enc_dec_attn_mask=None,
inference_params=None,
):
return hidden_states.clone()
def parallel_lm_logits(input_, word_embeddings_weight, parallel_output, bias=None):
"""LM logits using word embedding weights."""
args = get_args()
# Parallel logits.
if args.async_tensor_model_parallel_allreduce or args.sequence_parallel:
input_parallel = input_
model_parallel = parallel_state.get_tensor_model_parallel_world_size() > 1
async_grad_allreduce = (
args.async_tensor_model_parallel_allreduce
and model_parallel
and not args.sequence_parallel
)
else:
input_parallel = tensor_parallel.copy_to_tensor_model_parallel_region(input_)
async_grad_allreduce = False
# Matrix multiply.
# logits_parallel = tensor_parallel.layers.LinearWithGradAccumulationAndAsyncCommunication.apply(
# input_parallel, word_embeddings_weight, bias, args.gradient_accumulation_fusion, async_grad_allreduce, args.sequence_parallel)
logits_parallel = (
tensor_parallel.layers.linear_with_grad_accumulation_and_async_allreduce(
input_parallel,
word_embeddings_weight,
bias,
args.gradient_accumulation_fusion,
async_grad_allreduce,
args.sequence_parallel,
)
)
# Gather if needed.
if parallel_output:
return logits_parallel
return tensor_parallel.gather_from_tensor_model_parallel_region(logits_parallel)
def get_language_model(
num_tokentypes,
add_pooler,
encoder_attn_mask_type,
init_method=None,
scaled_init_method=None,
add_encoder=True,
add_decoder=False,
decoder_attn_mask_type=AttnMaskType.causal,
pre_process=True,
post_process=True,
):
"""Build language model and return along with the key to save."""
args = get_args()
if init_method is None:
init_method = init_method_normal(args.init_method_std)
if scaled_init_method is None:
scaled_init_method = scaled_init_method_normal(
args.init_method_std, args.num_layers
)
# Language model.
language_model = TransformerLanguageModel(
init_method,
scaled_init_method,
encoder_attn_mask_type,
num_tokentypes=num_tokentypes,
add_encoder=add_encoder,
add_decoder=add_decoder,
decoder_attn_mask_type=decoder_attn_mask_type,
add_pooler=add_pooler,
pre_process=pre_process,
post_process=post_process,
)
# key used for checkpoints.
language_model_key = "language_model"
return language_model, language_model_key
class Pooler(MegatronModule):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Arguments:
hidden_size: hidden size
init_method: weight initialization method for the linear layer.
bias is set to zero.
"""
def __init__(self, hidden_size, init_method):
super().__init__()
args = get_args()
self.dense = get_linear_layer(hidden_size, hidden_size, init_method)
self.sequence_parallel = args.sequence_parallel
def forward(self, hidden_states, sequence_index=0):
# hidden_states: [s, b, h]
# sequence_index: index of the token to pool.
# gather data along sequence dimensions
# same pooler is run on all tensor parallel nodes
if self.sequence_parallel:
hidden_states = tensor_parallel.mappings.gather_from_sequence_parallel_region(hidden_states)
pooled = hidden_states[sequence_index, :, :]
pooled = self.dense(pooled)
pooled = torch.tanh(pooled)
return pooled
class Embedding(MegatronModule):
"""Language model embeddings.
Arguments:
hidden_size: hidden size
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
init_method: weight initialization method
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
"""
def __init__(
self,
hidden_size,
vocab_size,
max_sequence_length,
embedding_dropout_prob,
init_method,
num_tokentypes=0,
):
super().__init__()
self.hidden_size = hidden_size
self.init_method = init_method
self.num_tokentypes = num_tokentypes
args = get_args()
# Word embeddings (parallel).
self.word_embeddings = VocabParallelEmbedding(
vocab_size, self.hidden_size, init_method=self.init_method
)
self._word_embeddings_key = "word_embeddings"
# Position embedding (serial).
self.position_embeddings = torch.nn.Embedding(
max_sequence_length, self.hidden_size
)
self._position_embeddings_key = "position_embeddings"
# Initialize the position embeddings.
self.init_method(self.position_embeddings.weight)
# Token type embedding.
# Add this as an optional field that can be added through
# method call so we can load a pretrain model without
# token types and add them as needed.
self._tokentype_embeddings_key = "tokentype_embeddings"
if self.num_tokentypes > 0:
self.tokentype_embeddings = torch.nn.Embedding(
self.num_tokentypes, self.hidden_size
)
# Initialize the token-type embeddings.
self.init_method(self.tokentype_embeddings.weight)
else:
self.tokentype_embeddings = None
self.fp32_residual_connection = args.fp32_residual_connection
self.sequence_parallel = args.sequence_parallel
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
def zero_parameters(self):
"""Zero out all parameters in embedding."""
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
self.position_embeddings.weight.data.fill_(0)
self.position_embeddings.weight.shared = True
if self.num_tokentypes > 0:
self.tokentype_embeddings.weight.fill_(0)
self.tokentype_embeddings.weight.shared = True
def add_tokentype_embeddings(self, num_tokentypes):
"""Add token-type embedding. This function is provided so we can add
token-type embeddings in case the pretrained model does not have it.
This allows us to load the model normally and then add this embedding.
"""
if self.tokentype_embeddings is not None:
raise Exception("tokentype embeddings is already initialized")
if torch.distributed.get_rank() == 0:
print(
"adding embedding for {} tokentypes".format(num_tokentypes), flush=True
)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size)
# Initialize the token-type embeddings.
self.init_method(self.tokentype_embeddings.weight)
def forward(self, input_ids, position_ids, tokentype_ids=None):
# Embeddings.
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
if tokentype_ids is not None:
assert self.tokentype_embeddings is not None
embeddings = embeddings + self.tokentype_embeddings(tokentype_ids)
else:
assert self.tokentype_embeddings is None
# Data format change to avoid explicit tranposes : [b s h] --> [s b h].
embeddings = embeddings.transpose(0, 1).contiguous()
# If the input flag for fp32 residual connection is set, convert for float.
if self.fp32_residual_connection:
embeddings = embeddings.float()
# Dropout.
if self.sequence_parallel:
embeddings = scatter_to_sequence_parallel_region(embeddings)
with tensor_parallel.get_cuda_rng_tracker().fork():
embeddings = self.embedding_dropout(embeddings)
else:
embeddings = self.embedding_dropout(embeddings)
return embeddings
class TransformerLanguageModel(MegatronModule):
"""Transformer language model.
Arguments:
transformer_hparams: transformer hyperparameters
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
"""
def __init__(
self,
init_method,
output_layer_init_method,
encoder_attn_mask_type,
num_tokentypes=0,
add_encoder=True,
add_decoder=False,
decoder_attn_mask_type=AttnMaskType.causal,
add_pooler=False,
pre_process=True,
post_process=True,
):
super().__init__()
args = get_args()
self.pre_process = pre_process
self.post_process = post_process
self.hidden_size = args.hidden_size
self.num_tokentypes = num_tokentypes
self.init_method = init_method
self.add_encoder = add_encoder
self.encoder_attn_mask_type = encoder_attn_mask_type
self.add_decoder = add_decoder
self.decoder_attn_mask_type = decoder_attn_mask_type
self.add_pooler = add_pooler
self.encoder_hidden_state = None
# Embeddings.
if self.pre_process:
self.embedding = Embedding(
self.hidden_size,
args.padded_vocab_size,
args.max_position_embeddings,
args.hidden_dropout,
self.init_method,
self.num_tokentypes,
)
self._embedding_key = "embedding"
# Transformer.
# Encoder (usually set to True, False if part of an encoder-decoder
# architecture and in encoder-only stage).
if self.add_encoder:
self.encoder = ParallelTransformer(
self.init_method,
output_layer_init_method,
self_attn_mask_type=self.encoder_attn_mask_type,
pre_process=self.pre_process,
post_process=self.post_process,
)
self._encoder_key = "encoder"
else:
self.encoder = None
# Decoder (usually set to False, True if part of an encoder-decoder
# architecture and in decoder-only stage).
if self.add_decoder:
self.decoder = ParallelTransformer(
self.init_method,
output_layer_init_method,
layer_type=LayerType.decoder,
self_attn_mask_type=self.decoder_attn_mask_type,
pre_process=self.pre_process,
post_process=self.post_process,
)
self._decoder_key = "decoder"
else:
self.decoder = None
if self.post_process:
# Pooler.
if self.add_pooler:
self.pooler = Pooler(self.hidden_size, self.init_method)
self._pooler_key = "pooler"
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
# This is usually handled in schedules.py but some inference code still
# gives us non-lists or None
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
if self.add_encoder and self.add_decoder:
assert (
len(input_tensor) == 1
), "input_tensor should only be length 1 for stage with both encoder and decoder"
self.encoder.set_input_tensor(input_tensor[0])
elif self.add_encoder:
assert (
len(input_tensor) == 1
), "input_tensor should only be length 1 for stage with only encoder"
self.encoder.set_input_tensor(input_tensor[0])
elif self.add_decoder:
if len(input_tensor) == 2:
self.decoder.set_input_tensor(input_tensor[0])
self.encoder_hidden_state = input_tensor[1]
elif len(input_tensor) == 1:
self.decoder.set_input_tensor(None)
self.encoder_hidden_state = input_tensor[0]
else:
raise Exception("input_tensor must have either length 1 or 2")
else:
raise Exception("Stage must have at least either encoder or decoder")
def forward(
self,
enc_input_ids,
enc_position_ids,
enc_attn_mask,
dec_input_ids=None,
dec_position_ids=None,
dec_attn_mask=None,
enc_dec_attn_mask=None,
tokentype_ids=None,
inference_params=None,
pooling_sequence_index=0,
enc_hidden_states=None,
output_enc_hidden=False,
):
args = get_args()
# Encoder embedding.
if self.pre_process:
encoder_input = self.embedding(
enc_input_ids, enc_position_ids, tokentype_ids=tokentype_ids
)
else:
encoder_input = None
# Run encoder.
if enc_hidden_states is None:
if self.encoder is not None:
encoder_output = self.encoder(
encoder_input, enc_attn_mask, inference_params=inference_params
)
else:
encoder_output = self.encoder_hidden_state
else:
encoder_output = enc_hidden_states.to(encoder_input.dtype)
if self.post_process:
if self.add_pooler:
pooled_output = self.pooler(encoder_output, pooling_sequence_index)
# output_enc_hidden refers to when we just need the encoder's
# output. For example, it is helpful to compute
# similarity between two sequences by average pooling
if not self.add_decoder or output_enc_hidden:
if self.add_pooler and self.post_process:
return encoder_output, pooled_output
else:
return encoder_output
# Decoder embedding.
if self.pre_process:
decoder_input = self.embedding(dec_input_ids, dec_position_ids)
else:
decoder_input = None
# Run decoder.
decoder_output = self.decoder(
decoder_input,
dec_attn_mask,
encoder_output=encoder_output,
enc_dec_attn_mask=enc_dec_attn_mask,
inference_params=inference_params,
)
if self.add_pooler and self.post_process:
return decoder_output, encoder_output, pooled_output
else:
return decoder_output, encoder_output
def post_language_model_processing(
lm_output, labels, logit_weights, parallel_output, fp16_lm_cross_entropy
):
# Output.
output = parallel_lm_logits(lm_output, logit_weights, parallel_output)
if labels is None:
return output
else:
if fp16_lm_cross_entropy:
assert output.dtype == torch.half
loss = tensor_parallel.vocab_parallel_cross_entropy(output, labels)
else:
loss = tensor_parallel.vocab_parallel_cross_entropy(output.float(), labels)
return loss
def module_size(m: torch.nn.Module, only_trainable: bool = False):
"""
returns the total number of parameters used by `m` (only counting
shared parameters once); if `only_trainable` is True, then only
includes parameters with `requires_grad = True`
"""
parameters = list(m.parameters())
if only_trainable:
parameters = [p for p in parameters if p.requires_grad]
unique = {p.data_ptr(): p for p in parameters}.values()
return sum(p.numel() for p in unique)
|
apex-master
|
apex/transformer/testing/standalone_transformer_lm.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron global variables."""
import os
import sys
import time
import torch
from apex.transformer.microbatches import build_num_microbatches_calculator
from .arguments import parse_args
_GLOBAL_ARGS = None
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
_GLOBAL_TOKENIZER = None
_GLOBAL_TENSORBOARD_WRITER = None
_GLOBAL_ADLR_AUTORESUME = None
_GLOBAL_TIMERS = None
def get_args():
"""Return arguments."""
_ensure_var_is_initialized(_GLOBAL_ARGS, 'args')
return _GLOBAL_ARGS
def get_num_microbatches() -> int:
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()
def get_current_global_batch_size() -> int:
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()
def update_num_microbatches(consumed_samples: int, *, consistency_check: bool = True) -> None:
"""Update the number of microbatches upon the number of consumed samples.
.. note::
This function has no effect unless ``rampup_batch_size`` is set.
Args:
consumed_samples: The number of consumed samples so far. Basically this is equal to
:math:`num_iter * global_batch_size`.
consistency_check: If :obj:`True`, sanity checks the consumed samples, i.e., check if
``consumed_samples`` is divisible by :math:`micro_batch_size \times data_parallel_size`.
"""
_GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples, consistency_check)
# def get_tokenizer():
# """Return tokenizer."""
# _ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
# return _GLOBAL_TOKENIZER
def get_tensorboard_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_TENSORBOARD_WRITER
def get_adlr_autoresume():
"""ADLR autoresume object. It can be None so no need
to check if it is initialized."""
return _GLOBAL_ADLR_AUTORESUME
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')
return _GLOBAL_TIMERS
def set_global_variables(extra_args_provider=None, args_defaults={}, override_args={},
ignore_unknown_args=False):
"""Set args, tokenizer, tensorboard-writer, adlr-autoresume, and timers."""
args = _parse_args(extra_args_provider=extra_args_provider,
defaults=args_defaults,
override_args=override_args,
ignore_unknown_args=ignore_unknown_args)
# _build_num_microbatches_calculator(args)
# if args.vocab_file:
# _ = _build_tokenizer(args)
_set_tensorboard_writer(args)
_set_adlr_autoresume(args)
_set_timers()
def _parse_args(extra_args_provider=None, defaults={}, override_args={},
ignore_unknown_args=False):
"""Parse entire arguments."""
global _GLOBAL_ARGS
_ensure_var_is_not_initialized(_GLOBAL_ARGS, 'args')
_GLOBAL_ARGS = parse_args(extra_args_provider=extra_args_provider,
defaults=defaults,
override_args=override_args,
ignore_unknown_args=ignore_unknown_args)
return _GLOBAL_ARGS
def _build_num_microbatches_calculator(args):
global _GLOBAL_NUM_MICROBATCHES_CALCULATOR
_ensure_var_is_not_initialized(_GLOBAL_NUM_MICROBATCHES_CALCULATOR,
'num microbatches calculator')
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = build_num_microbatches_calculator(
args)
# def _build_tokenizer(args):
# """Initialize tokenizer."""
# global _GLOBAL_TOKENIZER
# _ensure_var_is_not_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
# _GLOBAL_TOKENIZER = build_tokenizer(args)
# return _GLOBAL_TOKENIZER
# def rebuild_tokenizer(args):
# global _GLOBAL_TOKENIZER
# _GLOBAL_TOKENIZER = None
# return _build_tokenizer(args)
def _set_tensorboard_writer(args):
"""Set tensorboard writer."""
global _GLOBAL_TENSORBOARD_WRITER
_ensure_var_is_not_initialized(_GLOBAL_TENSORBOARD_WRITER,
'tensorboard writer')
if hasattr(args, 'tensorboard_dir') and \
args.tensorboard_dir and args.rank == (args.world_size - 1):
try:
from torch.utils.tensorboard import SummaryWriter
print('> setting tensorboard ...')
_GLOBAL_TENSORBOARD_WRITER = SummaryWriter(
log_dir=args.tensorboard_dir,
max_queue=args.tensorboard_queue_size)
except ModuleNotFoundError:
print('WARNING: TensorBoard writing requested but is not '
'available (are you using PyTorch 1.1.0 or later?), '
'no TensorBoard logs will be written.', flush=True)
def _set_adlr_autoresume(args):
"""Initialize ADLR autoresume."""
global _GLOBAL_ADLR_AUTORESUME
_ensure_var_is_not_initialized(_GLOBAL_ADLR_AUTORESUME, 'adlr autoresume')
if args.adlr_autoresume:
if args.rank == 0:
print('enabling autoresume ...', flush=True)
sys.path.append(os.environ.get('SUBMIT_SCRIPTS', '.'))
try:
from userlib.auto_resume import AutoResume
except BaseException:
print('ADLR autoresume is not available, exiting ...')
sys.exit()
_GLOBAL_ADLR_AUTORESUME = AutoResume
def _set_timers():
"""Initialize timers."""
global _GLOBAL_TIMERS
_ensure_var_is_not_initialized(_GLOBAL_TIMERS, 'timers')
_GLOBAL_TIMERS = Timers()
def _ensure_var_is_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is not None, '{} is not initialized.'.format(name)
def _ensure_var_is_not_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is None, '{} is already initialized.'.format(name)
class _Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.elapsed_ = 0.0
self.started_ = False
self.start_time = time.time()
def start(self):
"""Start the timer."""
assert not self.started_, 'timer has already been started'
torch.cuda.synchronize()
self.start_time = time.time()
self.started_ = True
def stop(self):
"""Stop the timer."""
assert self.started_, 'timer is not started'
torch.cuda.synchronize()
self.elapsed_ += (time.time() - self.start_time)
self.started_ = False
def reset(self):
"""Reset timer."""
self.elapsed_ = 0.0
self.started_ = False
def elapsed(self, reset=True):
"""Calculate the elapsed time."""
started_ = self.started_
# If the timing in progress, end it first.
if self.started_:
self.stop()
# Get the elapsed time.
elapsed_ = self.elapsed_
# Reset the elapsed time
if reset:
self.reset()
# If timing was in progress, set it back.
if started_:
self.start()
return elapsed_
class Timers:
"""Group of timers."""
def __init__(self):
self.timers = {}
def __call__(self, name):
if name not in self.timers:
self.timers[name] = _Timer(name)
return self.timers[name]
def write(self, names, writer, iteration, normalizer=1.0, reset=False):
"""Write timers to a tensorboard writer"""
# currently when using add_scalars,
# torch.utils.add_scalars makes each timer its own run, which
# polutes the runs list, so we just add each as a scalar
assert normalizer > 0.0
for name in names:
value = self.timers[name].elapsed(reset=reset) / normalizer
writer.add_scalar(name + '-time', value, iteration)
def log(self, names, normalizer=1.0, reset=True):
"""Log a group of timers."""
assert normalizer > 0.0
string = 'time (ms)'
for name in names:
elapsed_time = self.timers[name].elapsed(
reset=reset) * 1000.0 / normalizer
string += ' | {}: {:.2f}'.format(name, elapsed_time)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == (
torch.distributed.get_world_size() - 1):
print(string, flush=True)
else:
print(string, flush=True)
|
apex-master
|
apex/transformer/testing/global_vars.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import torch
from apex.transformer import parallel_state
class GradScaler(torch.cuda.amp.GradScaler):
"""
Gradient scaler for model-parallel inf check. The inf in gradients are checked across tensor-parallel
ranks in (1) executing optimizer step and (2) gradient scaler update.
"""
def __init__(
self, init_scale=2.0 ** 16, growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, enabled=True
):
super().__init__(
init_scale=init_scale,
growth_factor=growth_factor,
backoff_factor=backoff_factor,
growth_interval=growth_interval,
enabled=enabled,
)
def _unscale_grads_(self, optimizer, *args):
if getattr(optimizer, "_custom_amp_unscale_grads", False):
return optimizer.unscale_grads(*args)
else:
return super()._unscale_grads_(optimizer, *args)
def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs):
retval = None
found_inf = torch.cuda.FloatTensor([sum(v.item() for v in optimizer_state["found_inf_per_device"].values())])
# Update across all model parallel instances.
torch.distributed.all_reduce(
found_inf, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
if found_inf.item() == 0:
retval = optimizer.step(*args, **kwargs)
return retval
def update(self, new_scale=None):
"""
Updates the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
used directly, it's used to fill GradScaler's internal scale tensor. So if
``new_scale`` was a tensor, later in-place changes to that tensor will not further
affect the scale GradScaler uses internally.)
Args:
new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker("update")
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale) # type: ignore[union-attr]
else:
reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale.copy_(new_scale) # type: ignore[union-attr]
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [
found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()
]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
# Update across all model parallel instances.
torch.distributed.all_reduce(
found_inf_combined, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf = found_infs[i]
# Update across all model parallel instances.
torch.distributed.all_reduce(
found_inf, op=torch.distributed.ReduceOp.MAX, group=parallel_state.get_model_parallel_group()
)
found_inf_combined += found_inf
torch._amp_update_scale_(
_scale,
_growth_tracker,
found_inf_combined,
self._growth_factor,
self._backoff_factor,
self._growth_interval,
)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(torch.cuda.amp.grad_scaler._refresh_per_optimizer_state)
|
apex-master
|
apex/transformer/amp/grad_scaler.py
|
from apex.transformer.amp.grad_scaler import GradScaler
__all__ = [
"GradScaler",
]
|
apex-master
|
apex/transformer/amp/__init__.py
|
from apex.transformer._data._batchsampler import MegatronPretrainingRandomSampler
from apex.transformer._data._batchsampler import MegatronPretrainingSampler
__all__ = [
"MegatronPretrainingRandomSampler",
"MegatronPretrainingSampler",
]
|
apex-master
|
apex/transformer/_data/__init__.py
|
"""BatchSampler implementations for POC of dynamic batch size or rampup_batch_size support.
Implementations are based on https://github.com/NVIDIA/Megatron-LM/blob/bcd605f8570ebeeb0436c115ebbfafc3c5a40ae5/megatron/data/data_samplers.py.
""" # NOQA
import abc
import torch
__all__ = [
"MegatronPretrainingSampler",
"MegatronPretrainingRandomSampler",
]
class _Base:
"""Base class for Megatron style BatchSampler."""
@abc.abstractmethod
def __len__(self) -> int:
...
@abc.abstractmethod
def __iter__(self):
...
@property
@abc.abstractmethod
def local_minibatch_size(self) -> int:
...
@local_minibatch_size.setter
@abc.abstractclassmethod
def local_minibatch_size(self) -> None:
...
class MegatronPretrainingSampler(_Base):
def __init__(
self,
total_samples: int,
consumed_samples: int,
local_minibatch_size: int,
data_parallel_rank: int,
data_parallel_size: int,
drop_last: bool = True,
):
# Sanity checks.
if total_samples <= 0:
raise RuntimeError('no sample to consume: {}'.format(self.total_samples))
if consumed_samples >= total_samples:
raise RuntimeError('no samples left to consume: {}, {}'.format(self.consumed_samples, self.total_samples))
if local_minibatch_size <= 0:
raise RuntimeError(f"local minibatch size must be greater than 0: {local_minibatch_size}")
if data_parallel_size <= 0:
raise RuntimeError(f"data parallel size must be greater than 0: {data_parallel_size}")
if data_parallel_rank >= data_parallel_size:
raise RuntimeError('data_parallel_rank should be smaller than data size: {}, {}'.format(self.data_parallel_rank, data_parallel_size))
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self._local_minibatch_size = local_minibatch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.local_minibatch_times_data_parallel_size = self._local_minibatch_size * data_parallel_size
self.drop_last = drop_last
def __len__(self):
return self.total_samples
def get_start_end_idx(self):
start_idx = self.data_parallel_rank * self.local_minibatch_size
end_idx = start_idx + self.local_minibatch_size
return start_idx, end_idx
@property
def local_minibatch_size(self) -> int:
return self._local_minibatch_size
@local_minibatch_size.setter
def local_minibatch_size(self, new_local_minibatch_size) -> None:
self._local_minibatch_size = new_local_minibatch_size
self.local_minibatch_times_data_parallel_size = self._local_minibatch_size * self.data_parallel_size
def __iter__(self):
batch = []
# Last batch will be dropped if drop_last is not set False
for idx in range(self.consumed_samples, self.total_samples):
batch.append(idx)
if len(batch) == self.local_minibatch_size:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
batch = []
# Check the last partial batch and see drop_last is set
if len(batch) > 0 and not self.drop_last:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
class MegatronPretrainingRandomSampler(_Base):
"""Megatron style Random Batch Sampler.
Major difference is that `__iter__` yields a local minibatch, not a microbatch.
A local minibatch consists of `global_batch_size / data_parallel_size`
Args:
total_samples: The number of data samples, i.e. ``len(dataset)``.
consumed_samples: The number of samples already consumed in pretraining.
local_minibatch_size: The number of data in each batch returned from `__iter__`. Basically
`local_minibatch_size = global_batch_size / data_parallel_size`.
data_parallel_rank:
data_parallel_size:
"""
def __init__(
self,
total_samples: int,
consumed_samples: int,
local_minibatch_size: int,
data_parallel_rank: int,
data_parallel_size: int,
) -> None:
if total_samples <= 0:
raise ValueError(f"no sample to consume: total_samples of {total_samples}")
if local_minibatch_size <= 0:
raise ValueError(f"Invalid local_minibatch_size: {local_minibatch_size}")
if data_parallel_size <= 0:
raise ValueError(f"Invalid data_parallel_size: {data_parallel_size}")
if data_parallel_rank >= data_parallel_size:
raise ValueError(
f"data_parallel_rank should be smaller than data parallel size: {data_parallel_rank} < {data_parallel_size}"
)
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self._local_minibatch_size = local_minibatch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.local_minibatch_times_data_parallel_size = self._local_minibatch_size * self.data_parallel_size
self.last_batch_size = self.total_samples % self.local_minibatch_times_data_parallel_size
def __len__(self) -> int:
return self.total_samples
@property
def local_minibatch_size(self) -> int:
return self._local_minibatch_size
@local_minibatch_size.setter
def local_minibatch_size(self, new_local_minibatch_size) -> None:
self._local_minibatch_size = new_local_minibatch_size
self.local_minibatch_times_data_parallel_size = self._local_minibatch_size * self.data_parallel_size
def __iter__(self):
active_total_samples = self.total_samples - self.last_batch_size
self.epoch = self.consumed_samples // active_total_samples
current_epoch_samples = self.consumed_samples % active_total_samples
# note(mkozuki): might be better to uncomment
# assert current_epoch_samples % (self.data_parallel_size * apex.transformer.pipeline_parallel.utils.get_micro_batch_size()) == 0
# data sharding and random sampling
bucket_size = (self.total_samples // self.local_minibatch_times_data_parallel_size) * self.local_minibatch_size
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
g = torch.Generator()
g.manual_seed(self.epoch)
random_idx = torch.randperm(bucket_size, generator=g).tolist()
idx_range = [start_idx + x for x in random_idx[bucket_offset:]]
batch = []
# Last batch if not complete will be dropped.
for idx in idx_range:
batch.append(idx)
if len(batch) == self.local_minibatch_size:
self.consumed_samples += self.local_minibatch_times_data_parallel_size
yield batch
batch = []
|
apex-master
|
apex/transformer/_data/_batchsampler.py
|
# coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from apex._autocast_utils import _cast_if_autocast_enabled
from apex.transformer.enums import AttnMaskType
class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply upper triangular mask (typically used in gpt models).
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, scale):
import scaled_upper_triang_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_upper_triang_masked_softmax_cuda.forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_upper_triang_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_upper_triang_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None
def scaled_upper_triang_masked_softmax(inputs, _, scale):
b, np, sq, sk = inputs.size()
assert sq == sk, "causal mask is only for self attention"
# Reshaping input to 3D tensor (attn_batches, sq, sk)
inputs = inputs.view(-1, sq, sk)
args = _cast_if_autocast_enabled(inputs, scale)
with torch.cuda.amp.autocast(enabled=False):
probs = ScaledUpperTriangMaskedSoftmax.apply(*args)
return probs.view(b, np, sq, sk)
# NOTE (mkozuki): `ScaledMaskedSoftmax` somehow doesn't work well with `torch.cuda.amp.custom_fwd`.
# Without `cast_inputs` kwarg, somehow inputs are not cast to dtype used in the autocast context.
# So I needed to manually write two `torch.autograd.Function` inheritances.
# Fused operation which performs following three operations in sequence
# 1. Scale the tensor.
# 2. Apply the mask.
# 3. Perform softmax.
class ScaledMaskedSoftmax(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, mask, scale):
import scaled_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
def scaled_masked_softmax(inputs, mask, scale):
# input is 4D tensor (b, np, sq, sk)
if mask is not None:
args = _cast_if_autocast_enabled(inputs, mask, scale)
with torch.cuda.amp.autocast(enabled=False):
return ScaledMaskedSoftmax.apply(*args)
else:
args = _cast_if_autocast_enabled(inputs, scale)
with torch.cuda.amp.autocast(enabled=False):
return ScaledSoftmax.apply(*args)
class GenericScaledMaskedSoftmax(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, mask, scale):
import generic_scaled_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = generic_scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import generic_scaled_masked_softmax_cuda_new
softmax_results, scale_t = ctx.saved_tensors
input_grads = generic_scaled_masked_softmax_cuda.backward(output_grads, softmax_results, scale_t[0])
return input_grads, None, None
def generic_scaled_masked_softmax(inputs, mask, scale):
# input is 4D tensor (b, np, sq, sk)
args = _cast_if_autocast_enabled(inputs, mask, scale)
with torch.cuda.amp.autocast(enabled=False):
return GenericScaledMaskedSoftmax.apply(*args)
class ScaledSoftmax(torch.autograd.Function):
"""
Fused operation which performs following two operations in sequence
1. Scale the tensor.
2. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, scale):
import scaled_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_softmax_cuda.forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
class FusedScaleMaskSoftmax(torch.nn.Module):
"""
fused operation: scaling + mask + softmax
Arguments:
input_in_fp16: flag to indicate if input in fp16 data format.
input_in_bf16: flag to indicate if input in bf16 data format.
attn_mask_type: attention mask type (pad or causal)
scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def __init__(
self,
input_in_fp16,
input_in_bf16,
attn_mask_type,
scaled_masked_softmax_fusion,
mask_func,
softmax_in_fp32,
scale,
):
super().__init__()
self.input_in_fp16 = input_in_fp16
self.input_in_bf16 = input_in_bf16
if self.input_in_fp16 and self.input_in_bf16:
raise RuntimeError(
"both fp16 and bf16 flags cannot be active at the same time."
)
self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
self.attn_mask_type = attn_mask_type
self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
self.scale = scale
if not (self.scale is None or softmax_in_fp32):
raise RuntimeError("softmax should be in fp32 when scaled")
if self.scaled_masked_softmax_fusion:
if self.attn_mask_type == AttnMaskType.causal:
self.fused_softmax_func = scaled_upper_triang_masked_softmax
elif self.attn_mask_type == AttnMaskType.padding:
self.fused_softmax_func = scaled_masked_softmax
else:
raise ValueError("Invalid attn_mask_type.")
def forward(self, input, mask):
# [b, np, sq, sk]
assert input.dim() == 4
if self.is_kernel_available(mask, *input.size()):
return self.forward_fused_softmax(input, mask)
else:
return self.forward_torch_softmax(input, mask)
def is_kernel_available(self, mask, b, np, sq, sk):
attn_batches = b * np
if (
self.scaled_masked_softmax_fusion # user want to fuse
and self.input_in_float16 # input must be fp16
and (
self.attn_mask_type == AttnMaskType.causal
or self.attn_mask_type == AttnMaskType.padding
)
and 16 < sk <= 16384 # sk must be 16 ~ 16384
and sq % 4 == 0 # sq must be divisor of 4
and sk % 4 == 0 # sk must be divisor of 4
and attn_batches % 4 == 0 # np * b must be divisor of 4
):
if 0 <= sk <= 16384:
batch_per_block = self.get_batch_per_block(sq, sk, b, np)
if self.attn_mask_type == AttnMaskType.causal:
if attn_batches % batch_per_block == 0:
return True
else:
if sq % batch_per_block == 0:
return True
return False
def forward_fused_softmax(self, input, mask):
# input.shape = [b, np, sq, sk]
scale = self.scale if self.scale is not None else 1.0
return self.fused_softmax_func(input, mask, scale)
def forward_torch_softmax(self, input, mask):
if self.input_in_float16 and self.softmax_in_fp32:
input = input.float()
if self.scale is not None:
input = input * self.scale
mask_output = self.mask_func(input, mask) if mask is not None else input
probs = torch.nn.Softmax(dim=-1)(mask_output)
if self.input_in_float16 and self.softmax_in_fp32:
if self.input_in_fp16:
probs = probs.half()
else:
probs = probs.bfloat16()
return probs
@staticmethod
def get_batch_per_block(sq, sk, b, np):
import scaled_masked_softmax_cuda
return scaled_masked_softmax_cuda.get_batch_per_block(sq, sk, b, np)
class GenericFusedScaleMaskSoftmax(FusedScaleMaskSoftmax):
"""
Generic version of FusedSacleMaskSoftmax.
It removes the seq-len limitations and has slight performance degragation compared with FusedScaleMaskSoftmax
fused operation: scaling + mask + softmax
Arguments:
input_in_fp16: flag to indicate if input in fp16 data format.
input_in_bf16: flag to indicate if input in bf16 data format.
scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def __init__(
self, input_in_fp16, input_in_bf16, scaled_masked_softmax_fusion, mask_func, softmax_in_fp32, scale,
):
super().__init__(input_in_fp16, input_in_bf16, AttnMaskType.padding, scaled_masked_softmax_fusion, mask_func, softmax_in_fp32, scale)
self.scaled_masked_softmax_fusion = generic_scaled_masked_softmax
def is_kernel_available(self, mask, b, np, sq, sk):
if self.scaled_masked_softmax_fusion and 0 < sk: # user want to fuse # sk must be 1 ~
return True
return False
|
apex-master
|
apex/transformer/functional/fused_softmax.py
|
from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax
__all__ = [
"FusedScaleMaskSoftmax",
]
|
apex-master
|
apex/transformer/functional/__init__.py
|
from .fp16util import (
BN_convert_float,
network_to_half,
prep_param_lists,
model_grads_to_master_grads,
master_params_to_model_params,
tofp16,
to_python_float,
clip_grad_norm,
convert_module,
convert_network,
FP16Model,
)
from .fp16_optimizer import FP16_Optimizer
from .loss_scaler import LossScaler, DynamicLossScaler
|
apex-master
|
apex/fp16_utils/__init__.py
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
if isinstance(module, torch.nn.RNNBase) or isinstance(module, torch.nn.modules.rnn.RNNBase):
module.flatten_parameters()
return network
class FP16Model(nn.Module):
"""
Convert model to half precision in a batchnorm-safe way.
"""
def __init__(self, network):
from apex import deprecated_warning
deprecated_warning("apex.fp16_utils is deprecated and will be removed by the end of February 2023. Use [PyTorch AMP](https://pytorch.org/docs/stable/amp.html)")
super(FP16Model, self).__init__()
self.network = convert_network(network, dtype=torch.half)
def forward(self, *inputs):
inputs = tuple(t.half() for t in inputs)
return self.network(*inputs)
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# Backward compatibility fixes
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
clip_grad_norm = torch.nn.utils.clip_grad_norm_
|
apex-master
|
apex/fp16_utils/fp16util.py
|
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from ..amp._amp_state import _amp_state, maybe_print
from ..amp.scaler import LossScaler
from ..multi_tensor_apply import multi_tensor_applier
from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm
# TODO: Update overflow check + downscale to use Carl's fused kernel.
class FP16_Optimizer(object):
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True):
print("Warning: FP16_Optimizer is deprecated and dangerous, and will be deleted soon. "
"If it still works, you're probably getting lucky. "
"For mixed precision, use the documented API https://nvidia.github.io/apex/amp.html, with opt_level=O1.")
from apex import deprecated_warning
deprecated_warning("apex.fp16_utils is deprecated and will be removed by the end of February 2023. Use [PyTorch AMP](https://pytorch.org/docs/stable/amp.html)")
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.verbose = verbose
self.optimizer = init_optimizer
# init_state_dict sets up an alternative way to cast per-param state tensors.
# Stashing here in case https://github.com/pytorch/pytorch/issues/7733 makes it necessary.
# init_state_dict = init_optimizer.state_dict()
self.fp16_groups = []
self.fp32_from_fp16_groups = []
self.fp32_from_fp32_groups = []
for i, param_group in enumerate(self.optimizer.param_groups):
self.maybe_print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
fp32_from_fp16_params_this_group = []
for i, param in enumerate(param_group['params']):
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
master_param = param.detach().clone().float()
master_param.requires_grad = True
param_group['params'][i] = master_param
fp32_from_fp16_params_this_group.append(master_param)
# Reset existing state dict key to the new master param.
# We still need to recast per-param state tensors, if any, to FP32.
if param in self.optimizer.state:
self.optimizer.state[master_param] = self.optimizer.state.pop(param)
elif param.type() == 'torch.cuda.FloatTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
param_group['params'][i] = param
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
self.fp16_groups.append(fp16_params_this_group)
self.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
self.all_fp16_params = []
for group in self.fp16_groups:
self.all_fp16_params += group
self.all_fp32_from_fp16_params = []
for group in self.fp32_from_fp16_groups:
self.all_fp32_from_fp16_params += group
self.all_fp32_from_fp32_params = []
for group in self.fp32_from_fp32_groups:
self.all_fp32_from_fp32_params += group
# Leverage state_dict() and load_state_dict() to recast preexisting per-param state tensors
self.optimizer.load_state_dict(self.optimizer.state_dict())
# alternative way to cast per-param state tensors:
# self.optimizer.load_state_dict(init_state_dict)
if dynamic_loss_scale:
self.dynamic_loss_scale = True
if dynamic_loss_args is not None:
self.loss_scaler = LossScaler("dynamic", **dynamic_loss_args)
else:
self.loss_scaler = LossScaler("dynamic")
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
self.clip_grad_norm = clip_grad_norm
# TODO: Centralize exposure and import error checking for the C backend.
if multi_tensor_applier.available:
import amp_C
self.multi_tensor_scale = amp_C.multi_tensor_scale
self._dummy_overflow_buf = torch.cuda.IntTensor([0]);
# Having self.maybe_print distinct from _amp_state.maybe_print is another artifact
# of having to support FP16_Optimizer separately, for the time being.
def maybe_print(self, msg):
if self.verbose:
print(msg)
def __getstate__(self):
raise RuntimeError("FP16_Optimizer should be serialized using state_dict().")
def __setstate__(self, state):
raise RuntimeError("FP16_Optimizer should be deserialized using load_state_dict().")
def zero_grad(self, set_grads_to_None=False):
"""
Zero fp32 and fp16 parameter grads.
"""
# In principle, only the .grad attributes of the model params need to be zeroed,
# because gradients are copied into the FP32 master params. However, we zero
# all gradients owned by the optimizer, just to be safe:
for group in self.optimizer.param_groups:
for p in group['params']:
if set_grads_to_None:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
# Zero fp16 gradients owned by the model:
for fp16_group in self.fp16_groups:
for param in fp16_group:
if set_grads_to_None:
param.grad = None
else:
if param.grad is not None:
param.grad.detach_() # as in torch.optim.optimizer.zero_grad()
param.grad.zero_()
# Should not be used anymore.
# def _check_overflow(self):
# params = []
# for group in self.fp16_groups:
# for param in group:
# params.append(param)
# for group in self.fp32_from_fp32_groups:
# for param in group:
# params.append(param)
# self.overflow = self.loss_scaler.has_overflow(params)
# def _update_scale(self, has_overflow=False):
# self.loss_scaler.update_scale(has_overflow)
def _master_params_to_model_params(self):
if multi_tensor_applier.available:
if len(self.all_fp16_params) > 0:
multi_tensor_applier(
self.multi_tensor_scale,
self._dummy_overflow_buf,
[self.all_fp32_from_fp16_params, self.all_fp16_params],
1.0)
else:
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp16_group, fp32_from_fp16_group)
# To consider: Integrate distributed with this wrapper by registering a hook on each variable
# that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream.
# def _model_grads_to_master_grads(self):
# for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
# model_grads_to_master_grads(fp16_group, fp32_from_fp16_group)
# def _downscale_master(self):
# if self.loss_scale != 1.0:
# for group in self.optimizer.param_groups:
# for param in group['params']:
# if param.grad is not None:
# param.grad.data.mul_(1./self.loss_scale)
def clip_master_grads(self, max_norm, norm_type=2):
"""
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return self.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
scale = self.loss_scaler.loss_scale()
# To consider: Should this be in step(), or update_master_grads? It works either way,
# but I should make it consistent with the Amp control flow, which updates the scale
# during backward context manager exit.
# self._update_scale(self.overflow)
if self.overflow:
# Using _amp_state.maybe_print instead of self.print here is intentional.
maybe_print("Gradient overflow. Skipping step, reducing " +
"loss scale to {}".format(self.loss_scaler.loss_scale()))
return
if closure is not None:
retval = self._step_with_closure(closure)
else:
# torch.cuda.nvtx.range_push("pytorch optimizer step")
retval = self.optimizer.step()
# torch.cuda.nvtx.range_pop()
self._master_params_to_model_params()
return retval
def _step_with_closure(self, closure):
def wrapped_closure():
# helpful for debugging
# print("Calling wrapped_closure, first_closure_call_this_step = {}"
# .format(self.first_closure_call_this_step))
if self.first_closure_call_this_step:
# We expect that the fp16 params are initially fresh on entering self.step(),
# so _master_params_to_model_params() is unnecessary the first time wrapped_closure()
# is called within self.optimizer.step().
self.first_closure_call_this_step = False
else:
# If self.optimizer.step() internally calls wrapped_closure more than once,
# it may update the fp32 params after each call. However, self.optimizer
# doesn't know about the fp16 params at all. If the fp32 params get updated,
# we can't rely on self.optimizer to refresh the fp16 params. We need
# to handle that manually:
self._master_params_to_model_params()
# Our API expects the user to give us ownership of the backward() call by
# replacing all calls to loss.backward() with optimizer.backward(loss).
# This requirement holds whether or not the call to backward() is made within a closure.
# If the user is properly calling optimizer.backward(loss) within "closure,"
# calling closure() here will give the fp32 master params fresh gradients
# for the optimizer to play with, so all wrapped_closure needs to do is call
# closure() and return the loss.
temp_loss = closure()
while(self.overflow):
scale = self.loss_scaler.loss_scale()
# self._update_scale(self.overflow) # now done at the end of backward
print("OVERFLOW within closure! Skipping step, reducing loss scale to {}".format(
self.loss_scaler.loss_scale()))
temp_loss = closure()
return temp_loss
retval = self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
return retval
def backward(self, loss, update_master_grads=True, retain_graph=False):
"""
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below).
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
"""
# To consider: try multiple backward passes using retain_grad=True to find
# a loss scale that works. After you find a loss scale that works, do a final dummy
# backward pass with retain_graph=False to tear down the graph. Doing this would avoid
# discarding the iteration, but probably wouldn't improve overall efficiency.
scaled_loss = loss.float()*self.loss_scaler.loss_scale()
scaled_loss.backward(retain_graph=retain_graph)
if update_master_grads:
self.update_master_grads()
def update_master_grads(self):
# torch.cuda.nvtx.range_push("update_master_grads")
"""
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
"""
# if self.dynamic_loss_scale:
# self._check_overflow()
# if self.overflow: return
# self._model_grads_to_master_grads()
# self._downscale_master()
# Use the one-shot multi-tensor apply kernel
self.loss_scaler.clear_overflow_state()
if len(self.all_fp16_params) > 0:
# print("Model grads before")
# print([param.grad.data for param in self.all_fp16_params])
# I'm ONLY writing this as an incremental way to make some tests pass until
# I can refactor the tests as well.
# FP16_Optimizer should not be used by anyone.
model_grads = []
master_grads = []
for model_param, master_param in zip(self.all_fp16_params,
self.all_fp32_from_fp16_params):
if model_param.grad is not None:
model_grads.append(model_param.grad)
if master_param.grad is None:
master_param.grad = torch.empty_like(master_param)
master_grads.append(master_param.grad)
self.loss_scaler.unscale(
model_grads,
master_grads,
self.loss_scaler.loss_scale())
# print("Master grads after")
# print([param.grad.data for param in self.all_fp32_from_fp16_params])
if len(self.all_fp32_from_fp32_params) > 0:
model_grads = []
master_grads = []
for model_param, master_param in zip(self.all_fp32_from_fp32_params,
self.all_fp32_from_fp32_params):
if model_param.grad is not None:
model_grads.append(model_param.grad)
master_grads.append(master_param.grad)
# print("Model grads before")
# print([param.grad.data for param in self.all_fp32_from_fp32_params])
self.loss_scaler.unscale(
model_grads,
master_grads,
self.loss_scaler.loss_scale())
# print("Master grads after")
# print([param.grad.data for param in self.all_fp32_from_fp32_params])
# quit()
self.overflow = self.loss_scaler.update_scale()
# torch.cuda.nvtx.range_pop()
def inspect_master_grad_data(self):
"""
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
"""
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
# The optimizer owns only references to master params.
master_grads_data = []
for param_group in self.optimizer.param_groups:
master_grads_this_group = []
for param in param_group['params']:
if param.grad is not None:
master_grads_this_group.append(param.grad.data)
else:
master_grads_this_group.append(None)
master_grads_data.append(master_grads_this_group)
return master_grads_data
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
return self.loss_scaler.loss_scale()
def _set_loss_scale(self, value):
self.loss_scaler._loss_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
|
apex-master
|
apex/fp16_utils/fp16_optimizer.py
|
import torch
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class LossScaler:
"""
Class that manages a static loss scale. This class is intended to interact with
:class:`FP16_Optimizer`, and should not be directly manipulated by the user.
Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to
:class:`FP16_Optimizer`'s constructor.
Args:
scale (float, optional, default=1.0): The loss scale.
"""
def __init__(self, scale=1):
from apex import deprecated_warning
deprecated_warning("apex.fp16_utils is deprecated and will be removed by the end of February 2023. Use [PyTorch AMP](https://pytorch.org/docs/stable/amp.html)")
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss, retain_graph=False):
scaled_loss = loss*self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
class DynamicLossScaler:
"""
Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler`
indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of
:class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler`
operates, because the default options can be changed using the
the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor.
Loss scaling is designed to combat the problem of underflowing gradients encountered at long
times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss
scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are
encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has
occurred.
:class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch,
and :class:`DynamicLossScaler` adjusts the loss scale to a lower value.
If a certain number of iterations occur without overflowing gradients detected,
:class:`DynamicLossScaler` increases the loss scale once more.
In this way :class:`DynamicLossScaler` attempts to "ride the edge" of
always using the highest loss scale possible without incurring overflow.
Args:
init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.`
scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``.
scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale.
"""
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether the gradient overflowed
def update_scale(self, overflow):
if overflow:
# self.cur_scale /= self.scale_factor
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss, retain_graph=False):
scaled_loss = loss*self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
"""
TO-DO separate out into an example.
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('OVERFLOW!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
"""
|
apex-master
|
apex/fp16_utils/loss_scaler.py
|
from .multi_tensor_apply import MultiTensorApply
multi_tensor_applier = MultiTensorApply(2048*32)
|
apex-master
|
apex/multi_tensor_apply/__init__.py
|
import torch
class MultiTensorApply(object):
available = False
warned = False
def __init__(self, chunk_size):
try:
import amp_C
MultiTensorApply.available = True
self.chunk_size = chunk_size
except ImportError as err:
MultiTensorApply.available = False
MultiTensorApply.import_err = err
def check_avail(self):
if MultiTensorApply.available == False:
raise RuntimeError(
"Attempted to call MultiTensorApply method, but MultiTensorApply "
"is not available, possibly because Apex was installed without "
"--cpp_ext --cuda_ext. Original import error message:",
MultiTensorApply.import_err)
def __call__(self, op, noop_flag_buffer, tensor_lists, *args):
self.check_avail()
return op(self.chunk_size,
noop_flag_buffer,
tensor_lists,
*args)
|
apex-master
|
apex/multi_tensor_apply/multi_tensor_apply.py
|
apex-master
|
apex/contrib/__init__.py
|
|
import torch
import fused_index_mul_2d
class IndexMul2d_(torch.autograd.Function):
'''
Currently only support index in dimension 0 with a 2-dimension tensor.
The shape of indexed in1 must be same with in2. Now this kernel does not support broadcast.
The datatype must be float32 or float16.
'''
@staticmethod
def forward(ctx, in1: torch.Tensor, in2: torch.Tensor, idx1: torch.Tensor) -> torch.Tensor:
assert in2.size(0) == idx1.size(0)
if ((in1.dtype != torch.float32 and in1.dtype != torch.half) or in2.dtype != in1.dtype):
raise RuntimeError("input1'dtype and input2's dtype must be fp32 or fp16. And input type must be same")
if (in1.dim() != 2 or in2.dim() != 2):
raise RuntimeError("in1 and in2 must be 2-dimension tensor.")
if (idx1.dim() != 1):
raise RuntimeError("idx1 must be 1-dimension tensor.")
if not in1.is_contiguous():
in1 = in1.contiguous()
if not in2.is_contiguous():
in2 = in2.contiguous()
if not idx1.is_contiguous():
idx1 = idx1.contiguous()
assert in1.is_contiguous()
assert in2.is_contiguous()
assert idx1.is_contiguous()
out = torch.empty_like(in2)
if (in1.dtype == torch.float32):
fused_index_mul_2d.float_forward(
out,
in1,
in2,
idx1)
elif (in1.dtype == torch.half):
fused_index_mul_2d.half_forward(
out,
in1,
in2,
idx1)
ctx.for_backwards = (in1, in2, idx1)
return out
@staticmethod
def backward(ctx, grad_out):
in1, in2, idx1 = ctx.for_backwards
grad_in1, grad_in2 = index_mul_2d_backward(in1, in2, idx1, grad_out)
return grad_in1, grad_in2, None
class IndexMul2dBackward_(torch.autograd.Function):
@staticmethod
def forward(ctx, in1: torch.Tensor, in2: torch.Tensor, idx1: torch.Tensor,
grad_out: torch.Tensor) -> torch.Tensor:
if not in1.is_contiguous():
in1 = in1.contiguous()
if not in2.is_contiguous():
in2 = in2.contiguous()
if not idx1.is_contiguous():
idx1 = idx1.contiguous()
if not grad_out.is_contiguous():
grad_out = grad_out.contiguous()
assert in1.is_contiguous()
assert in2.is_contiguous()
assert idx1.is_contiguous()
assert grad_out.is_contiguous()
grad_in1 = torch.zeros_like(in1)
grad_in2 = torch.empty_like(in2)
if (in1.dtype == torch.float32):
fused_index_mul_2d.float_backward(
grad_in1,
grad_in2,
grad_out,
in1,
in2,
idx1)
elif (in1.dtype == torch.half):
fused_index_mul_2d.half_backward(
grad_in1,
grad_in2,
grad_out,
in1,
in2,
idx1)
ctx.for_backwards = (in1, in2, idx1, grad_out)
return grad_in1, grad_in2
@staticmethod
def backward(ctx, grad_grad_in1, grad_grad_in2):
if not grad_grad_in1.is_contiguous():
grad_grad_in1 = grad_grad_in1.contiguous()
if not grad_grad_in2.is_contiguous():
grad_grad_in2 = grad_grad_in2.contiguous()
assert grad_grad_in1.is_contiguous()
assert grad_grad_in2.is_contiguous()
in1, in2, idx1, grad_out = ctx.for_backwards
grad_in1 = torch.zeros_like(in1)
grad_in2 = torch.empty_like(in2)
grad_grad_out = torch.empty_like(grad_out)
if (in1.dtype == torch.float32):
fused_index_mul_2d.float_backward_backward(
grad_grad_out,
grad_in1,
grad_in2,
grad_out,
grad_grad_in1,
grad_grad_in2,
in1,
in2,
idx1)
elif (in1.dtype == torch.half):
fused_index_mul_2d.half_backward_backward(
grad_grad_out,
grad_in1,
grad_in2,
grad_out,
grad_grad_in1,
grad_grad_in2,
in1,
in2,
idx1)
return grad_in1, grad_in2, None, grad_grad_out
index_mul_2d = IndexMul2d_.apply
index_mul_2d_backward = IndexMul2dBackward_.apply
|
apex-master
|
apex/contrib/index_mul_2d/index_mul_2d.py
|
from .index_mul_2d import index_mul_2d
|
apex-master
|
apex/contrib/index_mul_2d/__init__.py
|
from .sparse_masklib import create_mask
from .asp import ASP
|
apex-master
|
apex/contrib/sparsity/__init__.py
|
import types
import torch
from .sparse_masklib import create_mask
from .permutation_lib import Permutation
torchvision_imported=True
try:
import torchvision
except ImportError:
print("[ASP][Warning] torchvision cannot be imported.")
torchvision_imported=False
import json
import os
import string
import time
def eligible_modules(model, whitelist_layer_types, allowed_layer_names, disallowed_layer_names):
eligible_modules_list = []
for name, mod in model.named_modules():
if isinstance(mod, whitelist_layer_types) and name not in disallowed_layer_names:
if allowed_layer_names is not None and name not in allowed_layer_names:
continue
eligible_modules_list.append((name, mod))
return eligible_modules_list
class ASP:
__model = None
__verbosity = 0
__optimizer = None
__sparse_parameters = []
__calculate_mask = None
__allow_permutation = True
__all_parameters = []
__save_permutation_graph = False
__permutation_output_dir = ''
@classmethod
def init_model_for_pruning(cls, model, mask_calculator="m4n2_1d",
verbosity=3,
whitelist=[torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.MultiheadAttention],
allowed_layer_names=None, disallowed_layer_names=[],
allow_recompute_mask=False, custom_layer_dict={},
allow_permutation=True):
"""Call this method to modify your model to take advantage of sparse matrix multiplication.
Note that this call alone only augments the model with additional buffers needed for sparse MMA,
it does not enable use of sparse MMA.
If you are starting with a fresh model:
model = ...
ASP.init_model_for_pruning(model, mask_calculator, ...)
if (training) ASP.init_optimizer_for_pruning(optimizer)
ASP.compute_sparse_masks() // sparsity is off by default, call when youy want to enable it.
If you are starting from a checkpoint:
model = ...
ASP.init_model_for_pruning(model, mask_calculator, ...)
torch.load(...)
if (training) ASP.init_optimizer_for_pruning(optimizer)
Arguments:
model The model
mask_calculator Either callable that computes mask given a tensor OR pattern string for sparse mask lib.
verbosity Integer controling verbosity level.
0 -> Only errors.
1 -> Errors and warnings.
2 -> Errors, warnings and info.
3 -> Errors, warnings, info and debug.
whitelist Module types approved for sparsity.
allowed_layer_names If not None, only layer names that appear in this list are considered for sparsity.
disallowed_layer_names If not [], only layer names that do not appear in this list are considered for sparsity.
allow_recompute_mask If True, stores pruned values so that dense weights can be restored.
Pruned weights are stored in CPU memory, hence this option does not increase GPU memory usage.
custom_layer_dict Dictionary of additional layer paremeters to sparsify. e.g. {CustomLinear: ['weight']}
allow_permutation If True, allow the input channel permutation to ease the influence of weight pruning.
[Future] Support for allow_recompute_mask can be removed, it is not part of sparse inference recipe.
"""
assert (cls.__model is None), "ASP has been initialized already."
cls.__model = model
cls.__verbosity = verbosity
cls.__allow_permutation = allow_permutation
if isinstance(mask_calculator, str):
def create_mask_from_pattern(param):
return create_mask(param, mask_calculator).bool()
cls.__calculate_mask = create_mask_from_pattern
else:
cls.__calculate_mask = mask_calculator #user defined function
# function to extract variables that will be sparsified.
# idea is that you will add one of these functions for each module type that can be sparsified.
if torchvision_imported:
print("[ASP] torchvision is imported, can work with the MaskRCNN/KeypointRCNN from torchvision.")
torchvision_version = str(torchvision.__version__)
torchvision_version_major = int(torchvision_version.split('.')[0])
torchvision_version_minor = int(torchvision_version.split('.')[1])
if torchvision_version_major == 0 and torchvision_version_minor < 12:
sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight'], torch.nn.modules.linear.NonDynamicallyQuantizableLinear: ['weight'], torch.nn.MultiheadAttention: ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight'], torchvision.ops.misc.Conv2d: ['weight']}
else: # Torchvision remove APIs that were deprecated before 0.8 (#5386) in 0.12.0, torchvision.ops.misc.Conv2d is removed
sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight'], torch.nn.modules.linear.NonDynamicallyQuantizableLinear: ['weight'], torch.nn.MultiheadAttention: ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']}
else:
sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight'], torch.nn.modules.linear.NonDynamicallyQuantizableLinear: ['weight'], torch.nn.MultiheadAttention: ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']}
if custom_layer_dict: # Update default list to include user supplied custom (layer type : parameter tensor), make sure this tensor type is something ASP knows how to prune
sparse_parameter_list.update(custom_layer_dict)
whitelist += list(custom_layer_dict.keys())
for module_type in whitelist:
assert (module_type in sparse_parameter_list), "Module %s :: Don't know how to sparsify module." % module.dtype()
# find all sparse modules, extract sparse parameters and decorate
def add_sparse_attributes(module_name, module):
sparse_parameters = sparse_parameter_list[type(module)]
for p_name, p in module.named_parameters():
if p_name in sparse_parameters and p.requires_grad:
# check for NVIDIA's TC compatibility: we check along the horizontal direction
if p.dtype == torch.float32 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #User defines FP32 and APEX internally uses FP16 math
print("[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype)))
continue
if p.dtype == torch.float16 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #For Conv2d dim= K x CRS; we prune along C
print("[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype)))
continue
if cls.__verbosity >= 3:
print("[ASP] Sparsifying %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype)))
mask = torch.ones_like(p).bool()
buffname = p_name.split(".")[-1] # buffer names cannot contain "."
module.register_buffer('__%s_mma_mask' % buffname, mask)
if allow_recompute_mask:
pruned = torch.zeros_like(p).cpu()
module.register_buffer('__%s_mma_pruned_p' % buffname, pruned)
else:
pruned = None
cls.__sparse_parameters.append((module_name, module, p_name, p, mask, pruned))
else:
if cls.__verbosity >= 3:
print("[ASP] Not sparsifying %s::%s of size=%s and type=%s" % (module_name, p_name, str(p.size()), str(p.dtype)))
for name, sparse_module in eligible_modules(model, tuple(whitelist), allowed_layer_names, disallowed_layer_names):
add_sparse_attributes(name, sparse_module)
if allow_permutation: # find all named modules, extract parameters and decorate, used for offline permutation in K dim
for module_name, module in model.named_modules():
module_type_str = str(type(module)).split("\'")[1]
if module_type_str == 'torch.nn.modules.container.Sequential' or module_type_str.startswith('torchvision.models'):
# filter out the 'torch.nn.modules.container.Sequential' type and the whole model, like 'torchvision.models.vgg.VGG'
continue
for p_name, p in module.named_parameters():
cls.__all_parameters.append((module_name, module, p_name, p))
if module_type_str == 'torch.nn.modules.batchnorm.BatchNorm2d':
# need to get the running_mean and running_var from model.state_dict(), as they are not the learnable parameters
module_mean_name = module_name + '.running_mean'
module_var_name = module_name + '.running_var'
for param_key in model.state_dict():
if module_mean_name == param_key or module_var_name == param_key:
cls.__all_parameters.append((module_name, module, param_key.split(".")[-1], model.state_dict()[param_key]))
# add the __permutation_output_dir field to save the intermediate results for permutation
cls.__permutation_output_dir = '.'
# Set the corresponding params from ASP class to the Permutation class
permutation_verbosity = 5
Permutation.set_permutation_params_from_asp(cls.__model, cls.__sparse_parameters, cls.__all_parameters, permutation_verbosity)
# Set the identical random seed for all GPUs to make sure the same results generated in permutation search
Permutation.set_identical_seed()
@classmethod
def already_init_asp_model(cls):
"""Call this method to check whether ASP has been initialized already.
"""
if cls.__model is None:
if cls.__verbosity >= 3:
print("[ASP] ASP has not been initialized.")
return False
else:
if cls.__verbosity >= 3:
print("[ASP] ASP has been initialized already.")
return True
@classmethod
def init_optimizer_for_pruning(cls, optimizer):
"""Call this method to monkey patch optimizer step function so that masks can be applied to
gradients and weights during training.
You must call init_model_for_pruning(...) before calling init_optimizer_for_pruning(...)
"""
assert (cls.__optimizer is None), "ASP has initialized optimizer already."
assert (cls.__calculate_mask is not None), "Called ASP.init_optimizer_for_pruning before ASP.init_model_for_pruning."
# store pointer to original optimizer step method
cls.__optimizer = optimizer
cls.__optimizer.__step = optimizer.step
def __step(opt_self, *args, **kwargs):
# prune gradients before step method
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if p.grad is not None: #thx pjudd
p.grad.mul_(mask)
# call original optimizer step method
rval = opt_self.__step(*args, **kwargs)
# prune parameters after step method
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
p.mul_(mask)
return rval
cls.__optimizer.step = types.MethodType(__step, cls.__optimizer)
@classmethod
def compute_sparse_masks(cls):
"""Call this method to enable sparsity.
If init(...) was called with allow_recompute_mask=False AND sparsity is disabled, pruned field can be None.
"""
with torch.no_grad():
if cls.__allow_permutation:
# Step 1: use the Torch.FX library to build the graph
# Step 2: permutation search with the customized kernel
# The simplest without user intervention:
# A. try to import with the distributed mode of the original model
# B. if meet the error, import with the none-distributed mode of the original model
start_time_permute = time.perf_counter()
successful_permutation = False
try:
successful_permutation = Permutation.permute_model(cls.__model.module, dump_fx_graph=cls.__save_permutation_graph, save_dumped_fx_graph=os.path.join(cls.__permutation_output_dir, 'model_offline_permutation_graph.json'))
if successful_permutation:
print("\n[compute_sparse_masks] permuted the (distributed) model.")
except AttributeError:
successful_permutation = Permutation.permute_model(cls.__model, dump_fx_graph=cls.__save_permutation_graph, save_dumped_fx_graph=os.path.join(cls.__permutation_output_dir, 'model_offline_permutation_graph.json'))
if successful_permutation:
print("\n[compute_sparse_masks] permuted the model.")
if successful_permutation:
duration_build_offline_permutation_graph = time.perf_counter() - start_time_permute
print("[compute_sparse_masks] Take {:.4f} seconds to find and apply permutations.".format(duration_build_offline_permutation_graph))
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if mask.sum() < mask.numel(): # when recalculating masks
# restore dense parameter if allow_recompute_mask is enabled
assert (pruned is not None), "Unable to restore dense parameter because allow_recompute_mask == False"
p.add_(pruned.cuda())
mask.set_(cls.__calculate_mask(p))
if pruned is not None: # stow away pruned weights to cpu
pruned.set_((p * (~mask)).cpu())
p.mul_(mask) # in-place multiplication, so pruned weights are 0-values, hence checkpoint will have 0s for pruned weights
if cls.__verbosity >= 2:
print("[ASP] Enabled %.2f%% sparsity for %s::%s of size=%s and type=%s with magnitude %s" % (100.0-100.0*mask.sum()/mask.numel(), module_name, p_name, str(p.size()), str(p.dtype), torch.sum(torch.abs(p))))
@classmethod
def restore_pruned_weights(cls):
"""Call this method to disable sparsity and restore all weights.
This will only work if init(...) was called with allow_recompute=True.
"""
with torch.no_grad():
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if mask.sum() < mask.numel():
assert (pruned is not None), "Unable to restore dense parameter because allow_recompute_mask == False"
p.add_(pruned.cuda())
mask.fill_(1)
pruned.zero_()
if cls.__verbosity >= 2:
print("[ASP] Disabled sparsity for %s::%s (dense weights restored)" % (module_name, p_name))
@classmethod
def is_sparsity_enabled(cls):
"""Call this method to determine if sparsity is enabled in the model.
The typical use case is right after checkpoint has been loaded.
"""
total,sp100,sp50 = 0,0,0
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
total += 1
mask_sum = mask.sum()
mask_numel = mask.numel()
if mask_sum == mask_numel:
sp100 += 1
elif mask_sum*2 == mask_numel:
sp50 += 1
assert (total == sp100 or total == sp50), "Inconsistent model sparsity"
if total == sp100:
return False
elif total == sp50:
return True
@classmethod
def prune_trained_model(cls, model, optimizer):
# add mask buffers to model (init_model_for_pruning), augment optimizer (init_optimizer_for_pruning) and compute masks (compute_sparse_masks)
cls.init_model_for_pruning(model, mask_calculator="m4n2_1d", verbosity=2, whitelist=[torch.nn.Linear, torch.nn.Conv2d, torch.nn.MultiheadAttention], allow_recompute_mask=False)
cls.init_optimizer_for_pruning(optimizer)
cls.compute_sparse_masks()
@classmethod
def set_permutation_saving_params(cls, allow_permutation=True, save_permutation_graph=False, permutation_output_dir='.'):
"""This function is used to set the permutation saving related parameters in ASP class and inside of the Permutation class."""
print("\n[ASP][set_permutation_saving_param] Set permutation saving related parameters")
print("\n[set_permutation_saving_param] Set permutation saving related parameters")
cls.__allow_permutation = allow_permutation
print("[set_permutation_saving_param]\t Allow permutation: {}".format(cls.__allow_permutation))
cls.__save_permutation_graph = save_permutation_graph
print("[set_permutation_saving_param]\t Save permutation graphs: {}".format(cls.__save_permutation_graph))
cls.__permutation_output_dir = permutation_output_dir
print("[set_permutation_saving_param]\t Permutation graphs saving dir: {}".format(cls.__permutation_output_dir))
Permutation.set_permutation_saving_params(allow_permutation, save_permutation_graph, permutation_output_dir)
|
apex-master
|
apex/contrib/sparsity/asp.py
|
import os
import torch
import json
import string
import time
import numpy as np
import sys
import builtins as __builtin__
import io
try:
from .permutation_search_kernels import accelerated_search_for_good_permutation, sum_after_2_to_4
print("[ASP][Info] permutation_search_kernels can be imported.")
except ImportError:
print("[ASP][Warning] permutation_search_kernels cannot be imported.")
print("[ASP][Warning] If you want to accelerate the permutation search process by GPU, please build APEX by following the instructions at https://github.com/NVIDIA/apex/blob/master/apex/contrib/sparsity/README.md")
def convert_fx_node_name(fx_node_name):
"""Standardize punctuation of a node's name: replace all '_' with '.'"""
return fx_node_name.replace('_', '.')
def get_node_parent_children(fx_node):
"""Populate lists of all direct parents and children of a node"""
# get node parent list, and convert node name to module name
node_parent_name_converted = []
if len(fx_node.all_input_nodes) > 0:
node_parent = fx_node.all_input_nodes
for item in node_parent:
converted_item = convert_fx_node_name(item.name)
node_parent_name_converted.append(converted_item)
else:
node_parent = []
# get node children list, and convert node name to module name
node_children_name_converted = []
if len(list(fx_node.users.keys())) > 0:
node_children = list(fx_node.users.keys())
for item in node_children:
converted_item = convert_fx_node_name(item.name)
node_children_name_converted.append(converted_item)
else:
node_children = []
return node_parent_name_converted, node_children_name_converted
def node_name_matches(node_name, module_name):
"""Check for a match between graph node name and stored module name, accounting for formatting and DDP training differences"""
# process: remove all punctuation, everything to lower case
def process(name):
return ''.join(c for c in name if c not in string.punctuation).lower()
processed_node_name = process(node_name)
processed_module_name = process(module_name)
# module names start with 'module.' in distributed data-parallel training, but fx graph node names don't; check for both
distributed_node_name = 'module.' + node_name
distributed_processed_node_name = 'module' + processed_node_name
return (node_name == module_name) or (distributed_node_name == module_name) or (processed_node_name == processed_module_name) or (distributed_processed_node_name == processed_module_name)
def replicate_sequence(sequence, replications):
"""Replicate a permutation to apply it to an even multiple of channel counts"""
replicated_sequence = []
for rep in range(replications):
offset = len(sequence) * rep
for c in sequence:
replicated_sequence.append(c+offset)
return replicated_sequence
class Permutation:
__model = None
__sparse_parameters = []
__allow_permutation = False
__all_parameters = []
__verbosity = 0 ## 0: errors only, 1: also high-level details, warnings, 2: also intermediate steps, 3: everything
__params_permuted_in_C = []
__params_permuted_in_K = []
__unpermuted_dims = []
__save_permutation_graph = False
__permutation_output_dir = ''
__manual_seed = None
__tcpstore_port = 2341
# these module types may be the target of permutations (have potentially sparse weights or are attributes with no parents)
__permutation_target_module_types = ['torch.nn.modules.conv.Conv1d',
'torch.nn.modules.conv.Conv2d',
'torch.nn.modules.linear.Linear',
'torch.nn.modules.linear.LazyLinear',
'torch.nn.modules.linear.NonDynamicallyQuantizableLinear',
'torch.nn.modules.activation.MultiheadAttention',
'get_attr']
# these module types are not permuted, but must pass any permutation seen by a child's C or passed-thru K to the parents' K
__simple_passthru_module_types = ['torch.nn.modules.activation.ReLU6',
'torch.nn.modules.activation.ReLU',
'torch.nn.modules.dropout.Dropout',
'torch.nn.modules.dropout.Dropout1d',
'torch.nn.modules.dropout.Dropout2d',
'torch.nn.modules.dropout.Dropout3d',
'torch.nn.modules.dropout.AlphaDropout',
'torch.nn.modules.dropout.FeatureAlphaDropout',
'torch.nn.modules.pooling.MaxPool2d',
'torch.nn.modules.pooling.AdaptiveAvgPool2d',
'torch.nn.modules.pooling.AvgPool2d',
'torch.nn.modules.activation.Hardsigmoid',
'torch.nn.modules.activation.Hardswish',
'torch.nn.modules.activation.GELU',
'torch.nn.modules.normalization.LocalResponseNorm',
'torch.nn.modules.activation.Softmin',
'torch.nn.modules.activation.Softmax',
'torch.nn.modules.activation.Softmax2d',
'torch.nn.modules.activation.LogSoftmax',
'torch.nn.modules.activation.AdaptiveLogSoftmaxWithLoss',
'torch.nn.modules.activation.SiLU',
'torch.nn.modules.activation.Sigmoid',
'concat',
'torch.nn.modules.flatten.Flatten' # if it's a problem, it'll be handled via dimension mismatch check
]
# these module types have parameters that must be permuted along K as well as need to pass the permutation thru to parents' K
__permute_K_and_passthru_module_types = ['torch.nn.modules.batchnorm.BatchNorm2d',
'torch.nn.modules.normalization.LayerNorm',
'torch.nn.modules.instancenorm.InstanceNorm2d',
'torch.nn.modules.batchnorm.SyncBatchNorm']
# these module types cannot be permuted safely (today), and cause neighboring layers to have permutations disabled
__disallow_permutations_module_types = ['torch.nn.modules.normalization.GroupNorm', # to handle: influence GCD of real children's sibling group
'torch.nn.modules.linear.Bilinear', # need to permute one input along in1_features and the other along in2_features
'torch.nn.modules.activation.GLU', # may work OOTB, but might need to explicitly handle dimsionality change
]
@classmethod
def set_identical_seed(cls, identical_seed=1):
"""Make all GPUs in DDP use the same seed to find identical permutations and not require syncing parameters later"""
if cls.__verbosity > 0:
print("[set_identical_seed] Set the identical seed: {:} for all GPUs to make sure the same results generated in permutation search".format(identical_seed))
cls.__manual_seed = identical_seed
cls.reset_seed()
@classmethod
def reset_seed(cls):
"""To find the same permutations no matter how many GPUs are used, we reset the seed before every search"""
identical_seed = cls.__manual_seed
assert identical_seed is not None, "Must call set_identical_seed() before it can be reset"
torch.manual_seed(identical_seed)
torch.cuda.manual_seed(identical_seed)
import random
np.random.seed(identical_seed)
random.seed(identical_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@classmethod
def set_tcpstore_port(cls, tcpstore_port):
"""Override the default port if it is in use in a distributed training session"""
cls.__tcpstore_port = tcpstore_port
if cls.__verbosity > 0:
print(f"[set_tcpstore_port] TCPStore port set to {cls.__tcpstore_port} .")
@classmethod
def set_permutation_saving_params(cls, allow_permutation=False, save_permutation_graph=False, permutation_output_dir='.'):
"""This function is used to set the permutation saving related parameters."""
cls.__allow_permutation = allow_permutation
cls.__save_permutation_graph = save_permutation_graph
cls.__permutation_output_dir = permutation_output_dir
if cls.__verbosity > 0:
print(f"[permutation_lib][set_permutation_saving_param] Set permutation saving related parameters\n\tAllow permutation: {cls.__alow_permutation}\n\tSave permutation graphs: {cls.__save_permutation_graph}\n\tPermutation graphs saving dir: {cls.__permutation_output_dir}")
@classmethod
def set_permutation_params_from_asp(cls, model, sparse_parameters, all_parameters, verbosity):
"""This function is used to set the permutation needed parameters from ASP class."""
cls.__verbosity = verbosity
if cls.__verbosity > 0:
print("[set_permutation_params_from_asp] Set permutation needed parameters")
cls.__model = model
cls.__sparse_parameters = sparse_parameters
cls.__all_parameters = all_parameters
if cls.__verbosity > 1:
sparse_param_names = [module_name+":"+p_name for (module_name, module, p_name, p, mask, pruned) in cls.__sparse_parameters]
all_param_names = [module_name+":"+p_name for (module_name, module, p_name, p) in cls.__all_parameters]
print(f"\tSparse parameter names: {sparse_param_names}\n\tAll parameter names: {all_param_names}")
cls.__params_permuted_in_C = []
cls.__params_permuted_in_K = []
cls.__unpermuted_dims = []
@classmethod
def permute_model(cls, model, dump_fx_graph=False, save_dumped_fx_graph='./model_permutation_graph.json'):
"""Permute a model's weights in order to maintain more magnitude after enforcing the sparsity constraint."""
if cls.__verbosity > 0:
print("\n[permute_model] Permuting the model")
# extract the output_dir, so all the intermediate fx_graph can be saved under that path
extract_output_dir=os.path.split(save_dumped_fx_graph)[0]
cls.__permutation_output_dir = extract_output_dir
fx_graph, success_in_build_fx_graph = cls.build_fx_graph(model, dump_fx_graph=dump_fx_graph, save_dumped_fx_graph=save_dumped_fx_graph)
if success_in_build_fx_graph:
fx_graph_after_init_flags = cls.init_permutation_flags(fx_graph)
fx_graph_after_find_real_parents = cls.find_real_parents(fx_graph_after_init_flags)
fx_graph_after_find_real_children = cls.find_real_children(fx_graph_after_find_real_parents)
fx_graph_after_making_groups = cls.make_sibling_coparent_groups(fx_graph_after_find_real_children)
fx_graph_after_fixup_concats = cls.fixup_concats(fx_graph_after_making_groups)
fx_graph_after_enforce_dimension_agreement = cls.enforce_dimension_agreement(fx_graph_after_fixup_concats)
fx_graph_after_propagate_flags = cls.propagate_permutation_flags(fx_graph_after_enforce_dimension_agreement)
start_time_search_for_good_permutation = time.perf_counter()
fx_graph_after_find_permutations = cls.find_permutations(fx_graph_after_propagate_flags)
if torch.distributed.is_initialized():
if cls.__verbosity > 0:
duration_search_for_good_permutation = time.perf_counter() - start_time_search_for_good_permutation
print(f"[permute_model] Rank {torch.distributed.get_rank()} completed search in {duration_search_for_good_permutation:.2f}s, waiting for others.", force=True)
torch.distributed.barrier()
duration_search_for_good_permutation = time.perf_counter() - start_time_search_for_good_permutation
if cls.__verbosity > 0:
print("\n[permute_model] Take {:.4f} seconds to finish search_for_good_permutation function.".format(duration_search_for_good_permutation))
fx_graph_after_sync_permutations = cls.sync_permutations(fx_graph_after_find_permutations)
fx_graph_after_apply_permutations = cls.apply_permutations(fx_graph_after_sync_permutations)
cls.check_graph_for_unpermuted_nodes(fx_graph_after_apply_permutations)
fx_graph = fx_graph_after_apply_permutations
if cls.__save_permutation_graph:
cls.save_graph_to_json(fx_graph, save_dumped_graph_path_with_name=os.path.join(cls.__permutation_output_dir, './model_graph_permutation_graph.json')) # save the intermediate graph as JSON file for debugging
return success_in_build_fx_graph
@classmethod
def get_permutation_stats(cls):
"""Return statistics for how many permutations were applied in various dimensions, used for testing"""
return cls.__params_permuted_in_C, cls.__params_permuted_in_K, cls.__unpermuted_dims
@classmethod
def apply_permutation_in_C_dim(cls, node_name, permutation_sequence, dryrun):
"""This function is used to permutation for a node in C dim. (Only need to handle the weight of the node) """
if cls.__verbosity > 1 and dryrun:
print("[apply_permutation_in_C_dim] Permutation for node: \'{:}\' in C dim".format(node_name))
if len(permutation_sequence) == 0:
if cls.__verbosity >= 0:
print(f"ERROR: [apply_permutation_in_C_dim] the permutation sequence for node {node_name} is empty, fail to apply permutation in C dim.")
return False
is_node_in_sparse_parameters = False
success_permutation = False
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if node_name_matches(node_name, module_name):
if cls.__verbosity > 2 and dryrun:
print("[apply_permutation_in_C_dim] find the node: \'{:}\' \'{:}\' in cls.__sparse_parameters, succeed to apply permutation in C dim.".format(node_name, p_name))
is_node_in_sparse_parameters = True
permutation_to_apply = permutation_sequence
if p.shape[1] != len(permutation_sequence): # assumed to be grouped convolutions or concatenated weights
if p.shape[1] % len(permutation_sequence) != 0:
return False
permutation_to_apply = replicate_sequence(permutation_sequence, p.shape[1] // len(permutation_sequence))
if not dryrun:
p.data.copy_(p[:, permutation_to_apply, ...])
cls.__params_permuted_in_C.append(node_name + "." + p_name)
success_permutation = True
if not is_node_in_sparse_parameters:
# A special case: if the node itself not in sparse_module_names but one of its real_siblings in sparse_module_names, then the node will not do the permutation search, but it may need to apply the offline permutation in C dim according to the searched permutation sequence from its real_siblings in sparse_module_names
try:
for module_name_from_all_parameters, module_from_all_parameters, p_name_from_all_parameters, p_from_all_parameters in cls.__all_parameters:
if node_name_matches(node_name, module_name_from_all_parameters) and p_name_from_all_parameters == "weight":
if cls.__verbosity > 3 and dryrun:
print("[apply_permutation_in_C_dim] cannot find the node: \'{:}\' \'{:}\' in cls.__sparse_parameters, but can find in cls.__all_parameters.".format(node_name, p_name_from_all_parameters))
permutation_to_apply = permutation_sequence
if p_from_all_parameters.shape[1] != len(permutation_sequence): # assumed to be grouped convolutions
if p_from_all_parameters.shpae[1] % len(permutation_sequence) != 0:
return False
permutation_to_apply = replicate_sequence(permutation_sequence, p_from_all_parameters.shape[1] // len(permutation_sequence))
if not dryrun:
p_from_all_parameters.data.copy_(p_from_all_parameters[:, permutation_to_apply, ...])
cls.__params_permuted_in_C.append(node_name + "." + p_name_from_all_parameters)
success_permutation = True
if cls.__verbosity > 2 and dryrun:
print("[apply_permutation_in_C_dim] cannot find the node: \'{:}\' in cls.__sparse_parameters, after trying with cls.__all_parameters, succeed to apply permutation in C dim.".format(node_name))
except:
success_permutation = False
if cls.__verbosity >= 0:
print("ERROR: [apply_permutation_in_C_dim] cannot find the node: \'{:}\' in cls.__sparse_parameters, after trying with cls.__all_parameters, still fail to apply permutation in C dim.".format(node_name))
return success_permutation
@classmethod
def permute_attr(cls, node_name, permutation_sequence, fx_graph, dryrun):
""" Permute a node's attributes. Somewhat hacky, assumes that we'll find exactly one dimension with a length matching the permutation's """
assert 'attr' in fx_graph[node_name].keys()
attr = fx_graph[node_name]['attr']
if cls.__verbosity > 1:
print(f"Found attribute {node_name} of shape {attr.shape}")
found_perm = False
for dim in range(len(attr.shape)):
if attr.shape[dim] == len(permutation_sequence):
if found_perm:
if cls.__verbosity > 0:
print(f"\tWARNING: {node_name} has already been permuted, but it's trying to happen again along another dimension {dim}.")
return False
found_perm = True
if cls.__verbosity > 1 and dryrun:
print(f"\tpermuting along dimension {dim}")
if not dryrun:
# permute the dimension of interest to the front, permute within that dimension, then reset it
order = [c for c in range(len(attr.shape))]
order[0] = dim
order[dim] = 0
prmt = tuple(order)
temp_weight = torch.clone(attr)
temp_weight = torch.permute(temp_weight, prmt)
temp_weight.copy_(temp_weight[permutation_sequence, ...])
temp_weight = torch.permute(temp_weight, prmt)
attr.data.copy_(temp_weight)
cls.__params_permuted_in_K.append(node_name + "_" + str(dim))
return found_perm
@classmethod
def apply_permutation_in_K_dim(cls, node_name, permutation_sequence, fx_graph, dryrun):
"""This function is used to permutation for a node in K dim. (Need to handle the weight/bias/running_mean/running_var of the node)"""
if cls.__verbosity > 1:
print("[apply_permutation_in_K_dim] Permutation for node: \'{:}\' in K dim".format(node_name))
if len(permutation_sequence) == 0:
if cls.__verbosity >= 0:
print("ERROR: [apply_permutation_in_K_dim] the permutation sequence is empty, fail to apply permutation in K dim.")
return False
# permute attribute nodes
if 'attr' in fx_graph[node_name].keys():
return cls.permute_attr(node_name, permutation_sequence, fx_graph, dryrun)
# if we didn't store the attribute already, look in the modules' parameters
is_node_in_all_parameters = False
success_permutation = False
for module_name, module, p_name, p in cls.__all_parameters:
if node_name_matches(node_name, module_name):
if cls.__verbosity > 1 and dryrun:
print("[apply_permutation_in_K_dim] find the node: \'{:}\' with \'{:}\' in cls.__all_parameters, may succeed to apply permutation in K dim.".format(node_name, p_name))
is_node_in_all_parameters = True
permutation_to_apply = permutation_sequence
if p.shape[0] != len(permutation_sequence): # assumed to be grouped convolutions
if cls.__verbosity > 2 and dryrun:
print(f"Mismatch in K dimension between found module {module_name} {p_name} for node {node_name}: permutation length {len(permutation_sequence)} but parameter shape in K {p.shape[0]}")
if p.shape[0] % len(permutation_sequence) != 0:
return False
permutation_to_apply = replicate_sequence(permutation_sequence, p.shape[0] // len(permutation_sequence))
if cls.__verbosity > 1 and dryrun:
print("[apply_permutation_in_K_dim] the node: \'{:}\' with shape: \'{:}\' required replicating the permutation sequence with len \'{:}\' {:} times to succeed in applying the permutation in the K dimension.".format(node_name, p.shape, len(permutation_sequence), p.shape[0] // len(permutation_sequence)))
else:
if cls.__verbosity > 1 and dryrun:
print("[apply_permutation_in_K_dim] the node: \'{:}\' with shape: \'{:}\', can match the size of permutation sequence with len: \'{:}\', succeed to apply permutation in K dim.".format(node_name, p.shape, len(permutation_sequence)))
if not dryrun:
p.data.copy_(p[permutation_to_apply, ...])
cls.__params_permuted_in_K.append(node_name + "." + p_name)
success_permutation = True
if not is_node_in_all_parameters:
if cls.__verbosity >= 0:
print("ERROR: [apply_permutation_in _K_dim] cannot find the node: \'{:}\' in cls.__all_parameters, fail to apply permutation in K dim.".format(node_name))
success_permutation = False
return success_permutation
@classmethod
def check_graph_for_unpermuted_nodes(cls, fx_graph):
"""Make sure that all permutable nodes/parameters were actually permuted and all GPUs agree"""
for node_name in fx_graph.keys():
node = fx_graph[node_name]
if 'C_permutable' in node.keys() and node['C_permutable'] and not node['C_permuted']:
sibling_group_id = node['sibling_group_id']
if node['is_real'] and cls.__group_data['skipped_sibling_groups'][sibling_group_id] is None:
if cls.__verbosity >= 0:
print(f"{node_name} was C_permutable in a not skipped sibling group but was not permuted along C! {node}")
cls.__unpermuted_dims.append(node_name + "_C")
if 'K_permutable' in node.keys() and node['K_permutable'] and not node['K_permuted']:
coparent_group_id = node['coparent_group_id']
if node['is_real'] and cls.__group_data['skipped_coparent_groups'][coparent_group_id] is None:
if cls.__verbosity >= 0:
print(f"{node_name} was K_permutable in a not skipped coparent group but was not permuted along K! {node}")
cls.__unpermuted_dims.append(node_name + "_K")
if cls.__verbosity > 0:
print(f"[check_graph_for_unpermuted_nodes] found nodes that missed permutations along {len(cls.__unpermuted_dims)} dimensions.")
# make sure all GPUs agree
if torch.distributed.is_initialized():
cls.__unpermuted_dims = sorted(cls.__unpermuted_dims)
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
dist_store = torch.distributed.TCPStore("127.0.0.1", cls.__tcpstore_port, world_size, rank==0)
torch.distributed.barrier()
dist_store.set(str(rank), ','.join(cls.__unpermuted_dims))
torch.distributed.barrier()
if rank == 0:
my_list = dist_store.get('0').decode()
for peer in range(1, world_size):
peer_list = dist_store.get(str(peer)).decode()
assert my_list == peer_list, f"peer {peer} disagreed with rank 0's list of unpermuted nodes: \n{my_list}\n{peer_list}"
@classmethod
def find_sparse_parameters_for_node(cls, node_name):
"""If the node has parameters that are in the trackd sparse parameter list, find them and reshape to a 2D tensor with channels last"""
node_weight = None
# check the sparse parameters
for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:
if node_name_matches(node_name, module_name):
node_weight = torch.zeros_like(p)
node_weight.copy_(p)
# if we found something, reshape to concatenate along the same dimension
if node_weight is not None:
# Need to handle the concat for layers with different R & S
shape = node_weight.shape
# 1d-tensor
if len(shape) == 1:
node_weight = node_weight.view(1, shape[0])
# 2d-tensor (K, C)
elif len(shape) == 2:
node_weight = node_weight.view(shape[0], shape[1])
# 3d-tensor (K, C, R)
elif len(shape) == 3:
node_weight = node_weight.permute(0,2,1).contiguous().view(shape[0]*shape[2], shape[1])
# 4d-tensor (K, C, R, S)
elif len(shape) == 4:
# convs
node_weight = node_weight.permute(2,3,0,1).contiguous().view(shape[2]*shape[3]*shape[0], shape[1])
return node_weight
@classmethod
def find_permutation_for_matrix_group(cls, matrix_group):
"""Find a good permutation for some matrix (which may be concatenated matrices that require the same permutation)"""
if cls.__verbosity > 1:
print(f"Searching for a good permutation for this sibling group of shape {matrix_group.shape}")
permutation_found = False
num_channels = matrix_group.shape[1]
group_permutation = [c for c in range(num_channels)]
# automatic check for skipping the permutation search process
original_magnitude = (torch.abs(matrix_group)).sum(dtype=torch.float64)
pruned_magnitude = sum_after_2_to_4(matrix_group.cpu().detach().numpy())
diff_ratio = abs(original_magnitude - pruned_magnitude)/original_magnitude
epsilon = 1e-3
if cls.__verbosity > 1:
print("\n[search_for_good_permutation] Original element abs sum: {:}, Pruned element abs sum: {:}, Diff ratio: {:}".format(original_magnitude, pruned_magnitude, diff_ratio))
start_time_accelerated_search_for_good_permutation = time.perf_counter()
if diff_ratio < epsilon:
if cls.__verbosity > 2:
print("[search_for_good_permutation] Original element abs sum is almost same as the pruned element abs sum, further permutation search will not help, skipping!")
else:
if cls.__verbosity > 2:
print("[search_for_good_permutation] Original element abs sum is different from the pruned element abs sum, further permutation search will help, continue with the permutation search!")
# call the permutation search CUDA kernels as ASP extension.
# users can provide prefer search strategy by providing a valid 'search_options' as a dictionary,
# or users can implement their customized 'accelerated_search_for_good_permutation' function.
search_options = {}
# No.1 Strategy: Exhaustive Search
search_options['strategy'] = 'exhaustive'
search_options['stripe_group_size'] = 8
search_options['escape_attempts'] = 100
# No.2 Strategy: Progressive Channel Swap Search
# search_options['strategy'] = 'progressive channel swap'
# search_options['progressive_search_time_limit'] = 10
# search_options['improvement_threshold'] = 1e-9
# permutation search time is too long for matrix_group with large channel num
# change from Exhaustive Search to Progressive Channel Swap Search based on input matrix_group size
if num_channels > 2048:
search_options = {}
search_options['strategy'] = 'progressive channel swap'
search_options['progressive_search_time_limit'] = 120
search_options['improvement_threshold'] = 1e-9
if cls.__verbosity > 1:
print(f"[search_for_good_permutation] search options: {search_options}")
group_permutation = accelerated_search_for_good_permutation(matrix_group, options=search_options, verbosity=cls.__verbosity)
permutation_found = True
if cls.__verbosity > 1:
duration_accelerated_search_for_good_permutation = time.perf_counter() - start_time_accelerated_search_for_good_permutation
permuted_magnitude = sum_after_2_to_4(matrix_group.cpu().detach().numpy()[:,group_permutation])
print("[search_for_good_permutation] Take {:.4f} seconds to finish accelerated_search_for_good_permutation function and with final magnitude {:}.".format(duration_accelerated_search_for_good_permutation, permuted_magnitude))
return group_permutation, permutation_found
@classmethod
def skip_sibling_group(cls, fx_graph, sibling_group_id, reason):
"""Keep track of sibling groups that do not have permutations applied"""
# grab a parent to get the coparent group id
sibling_group = cls.__group_data['sibling_groups'][sibling_group_id]
a_sibling = list(sibling_group)[0]
a_parent = fx_graph[a_sibling]['real_parents'][0]
coparent_group_id = fx_graph[a_parent]['coparent_group_id']
if cls.__verbosity > 1:
print(f"Skipping permutations for Sibling Group {sibling_group_id} and Coparent Group {coparent_group_id}: {reason}")
cls.__group_data['skipped_sibling_groups'][sibling_group_id] = reason
cls.__group_data['skipped_coparent_groups'][coparent_group_id] = reason
@classmethod
def collect_sparse_weights(cls, fx_graph, sibling_group, sibling_group_C_param):
"""Gather all sparse weights for a sibling group (to serve as input to the permutation search)"""
matrix_group = None
for sibling in sibling_group:
node_weight = cls.find_sparse_parameters_for_node(sibling)
if node_weight is not None:
# reshape due to siblings with grouped convolutions of different sizes
assert node_weight.shape[1] % sibling_group_C_param == 0, f"sibling {sibling}'s weights' C={node_weight.shape[1]} must be even multiple of the sibling group's C parameter {sibling_group_C_param}"
node_weight = torch.reshape(node_weight, (-1, sibling_group_C_param))
if matrix_group is None:
matrix_group = node_weight
else:
try:
matrix_group = torch.cat((matrix_group, node_weight), dim = 0) # concat the weights in the K dimension, keep the same C dimension
except:
if cls.__verbosity >= 0:
print("ERROR: [search_for_good_permutation][warning] cannot merge the weight for node: \'{:}\', with its weight shape: \'{:}\', the matrix_group shape: \'{:}\'.".format(sibling, node_weight.size(), matrix_group.size()))
continue
if cls.__verbosity > 2:
print("[search_for_good_permutation] have merged the weight for node: \'{:}\', with its weight shape: \'{:}\', the matrix_group shape: \'{:}\'.".format(sibling, node_weight.size(), matrix_group.size()))
else:
if cls.__verbosity > 2:
print(f"[search_for_good_permutation] not adding dense weights for node {sibling} to the group")
return matrix_group
@classmethod
def find_sibling_group_permutation(cls, fx_graph, sibling_group_id):
""""Find a good permutation for some sibling group"""
if cls.__verbosity > 1:
print(f"Finding permutation for sibling group {sibling_group_id}")
cls.reset_seed()
sibling_group = cls.__group_data['sibling_groups'][sibling_group_id]
sibling_group_C_param = int(cls.__group_data['sibling_group_C_params'][sibling_group_id])
if sibling_group_C_param % 4 != 0 or sibling_group_C_param < 8:
cls.skip_sibling_group(fx_graph, sibling_group_id, f"Useless C: {sibling_group_C_param}")
return
# collect *sparse* weights from all siblings, get the coparent group
matrix_group = cls.collect_sparse_weights(fx_graph, sibling_group, sibling_group_C_param)
# early-out if no siblings are sparse
if matrix_group is None:
cls.skip_sibling_group(fx_graph, sibling_group_id, 'Dense')
return
# find a good permutation
group_permutation, found = cls.find_permutation_for_matrix_group(matrix_group)
# if no permutation was found, we didn't need it (input already sparse)
if not found:
cls.skip_sibling_group(fx_graph, sibling_group_id, 'Not needed')
return
if cls.__verbosity > 2:
print(f"Permutation for sibling group {sibling_group_id}: {group_permutation}")
cls.__group_data['sibling_group_permutations'][sibling_group_id] = group_permutation
@classmethod
def permute_sibling_group(cls, fx_graph, sibling_group_id, group_permutation):
"""Apply a permutation to some sibling group"""
if cls.__verbosity > 1:
print(f"Attempting to permute sibling group {sibling_group_id}")
sibling_group = cls.__group_data['sibling_groups'][sibling_group_id]
# apply the permutation in two steps: first, a dry run to find any issues.
# if there were no issues, actually apply the permutation in the second step.
success = True
coparent_group_id = None
for dryrun in [True, False]:
# apply that permutation to the siblings' C dimension
for sibling in sibling_group:
assert fx_graph[sibling]['C_permutable'] and not fx_graph[sibling]['C_permuted']
sibling_permuted = cls.apply_permutation_in_C_dim(sibling, group_permutation, dryrun)
if dryrun:
success = success and sibling_permuted
else:
assert sibling_permuted, "shouldn't fail permuting siblings after the dry run"
fx_graph[sibling]['C_permuted'] = sibling_permuted
a_parent = fx_graph[sibling]['real_parents'][0]
if coparent_group_id is None:
coparent_group_id = fx_graph[a_parent]['coparent_group_id']
else:
assert coparent_group_id == fx_graph[a_parent]['coparent_group_id'], f"parent {a_parent} must belong to the same coparent group {coparent_group_id}, not {fx_graph[a_parent]['coparent_group_id']}"
# grab the parents (and co-parents) and apply to their K dimension
coparents = cls.__group_data['coparent_groups'][coparent_group_id]
for coparent in coparents:
assert fx_graph[coparent]['K_permutable'] and not fx_graph[coparent]['K_permuted']
coparent_permuted = cls.apply_permutation_in_K_dim(coparent, group_permutation, fx_graph, dryrun)
if dryrun:
success = success and coparent_permuted
else:
assert coparent_permuted, "shouldn't fail permuting coparents after the dry run"
fx_graph[coparent]['K_permuted'] = coparent_permuted
children_permuted = cls.apply_permutation_in_K_dim_to_children(fx_graph, coparent, group_permutation, dryrun)
if dryrun:
success = success and children_permuted
else:
assert children_permuted, "shouldn't fail permuting coparents' children after the dry run"
if not success:
cls.skip_sibling_group(fx_graph, sibling_group_id, "dryrun_failure")
if cls.__verbosity > 0:
print(f"There was an issue permuting sibling group {sibling_group_id}, skipping it to preserve network quality.")
break
@classmethod
def apply_permutation_in_K_dim_to_children(cls, fx_graph, node_name, permutation, dryrun):
"""Apply a permutation along K to the children of some node"""
success = True
children = fx_graph[node_name]['children']
if cls.__verbosity > 2 and dryrun:
print(f"Applying a permutation in K to children of {node_name} : {children}")
# apply the permutation along K to children as necessary
for child in children:
if 'is_real' in fx_graph[child].keys() and fx_graph[child]['is_real']:
if cls.__verbosity > 3 and dryrun:
print(f"\tFound a real child {child}, not permuting it or its children along K")
else:
if 'module_type' not in fx_graph[child].keys() or fx_graph[child]['module_type'] == 'None':
if cls.__verbosity > 3 and dryrun:
print(f"\tPermuting children of non-module {child} along K")
success = success and cls.apply_permutation_in_K_dim_to_children(fx_graph, child, permutation, dryrun)
elif not fx_graph[child]['C_permutable']:
if fx_graph[child]['K_permutable'] and not fx_graph[child]['K_permuted']:
if cls.__verbosity > 2 and dryrun:
print(f"\tPermuting {child} along K")
child_permuted = cls.apply_permutation_in_K_dim(child, permutation, fx_graph, dryrun)
success = success and child_permuted
if not dryrun:
fx_graph[child]['K_permuted'] = child_permuted
assert fx_graph[child]['K_passthru']
if fx_graph[child]['K_passthru']:
success = success and cls.apply_permutation_in_K_dim_to_children(fx_graph, child, permutation, dryrun)
else:
if cls.__verbosity >= 0:
print(f"\t!! ERROR {child} was a not real module that was not K_passthru")
return success
@classmethod
def defer_prints(cls):
"""Collect prints from this rank in distributed mode to avoid interleaved output"""
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1:
cls.__new_stdout = io.StringIO(str(torch.distributed.get_rank()))
cls.__builtin_print = __builtin__.print
def deferred_print(*args, **kwargs):
try: # see if torchvision examples has suppressed other ranks with the force argument
cls.__builtin_print(*args, file=cls.__new_stdout, force=True, **kwargs)
except:
cls.__builtin_print(*args, file=cls.__new_stdout, **kwargs)
__builtin__.print = deferred_print
@classmethod
def resume_prints(cls):
"""Emit the collected outputs from this rank, resume immediate printing"""
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1:
output = cls.__new_stdout.getvalue()
__builtin__.print = cls.__builtin_print
try:
print(output, force=True)
except:
print(output)
@classmethod
def find_permutations(cls, fx_graph):
"""Search for permutations for all sibling groups"""
for sibling_group_id in cls.__group_data['sibling_groups'].keys():
search_this_group = True
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
if sibling_group_id % world_size != rank:
search_this_group = False
cls.__group_data['sibling_group_permutations'][sibling_group_id] = None
if search_this_group:
cls.defer_prints()
sibling_group = cls.__group_data['sibling_groups'][sibling_group_id]
test_node_name = list(sibling_group)[0]
if not fx_graph[test_node_name]['C_permutable']:
if cls.__verbosity > 1:
print(f"Skipping permutation for sibling group {sibling_group_id} since it does not allow permutations along C")
else:
if cls.__verbosity > 1:
print(f"Sibling group {sibling_group_id} can permute along C, permuting it")
cls.find_sibling_group_permutation(fx_graph, sibling_group_id)
cls.resume_prints()
return fx_graph
@classmethod
def sync_permutations(cls, fx_graph):
"""If multiple GPUs were involved in finding permutations, make sure everyone's in sync"""
if not torch.distributed.is_initialized():
return fx_graph
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
dist_store = torch.distributed.TCPStore("127.0.0.1", cls.__tcpstore_port, world_size, rank==0)
if cls.__verbosity > 0:
print(f"Syncing permutations found among world size {world_size}")
torch.distributed.barrier()
for sibling_group_id in sorted(cls.__group_data['sibling_groups'].keys()):
src_rank = sibling_group_id % world_size
if src_rank == rank:
to_send = cls.__group_data['sibling_group_permutations'].get(sibling_group_id, None)
skip_reason = None
if to_send is None:
skip_reason = cls.__group_data['skipped_sibling_groups'].get(sibling_group_id, None)
if skip_reason is None:
to_send = ''
else:
to_send = 'skip'
else:
to_send = ','.join(str(c) for c in to_send)
dist_store.set(str(sibling_group_id), to_send)
if skip_reason is not None:
dist_store.set(f"skip {sibling_group_id}", skip_reason)
if cls.__verbosity > 1:
print(f"{rank}: stored permutation for sibling group {sibling_group_id}", force=True)
torch.distributed.barrier()
for sibling_group_id in sorted(cls.__group_data['sibling_groups'].keys()):
permutation = dist_store.get(str(sibling_group_id)).decode()
if permutation == 'skip':
permutation = None
skip_reason = dist_store.get(f"skip {sibling_group_id}").decode()
cls.skip_sibling_group(fx_graph, sibling_group_id, skip_reason)
else:
if len(permutation) == 0:
permutation = None
else:
permutation = [int(c) for c in permutation.split(',')]
cls.__group_data['sibling_group_permutations'][sibling_group_id] = permutation
if cls.__verbosity > 1:
print(f"Got permutation for sibling group {sibling_group_id}")
torch.distributed.barrier()
return fx_graph
@classmethod
def apply_permutations(cls, fx_graph):
"""Apply all the permutations that were found to the network appropriately"""
for sibling_group_id in cls.__group_data['sibling_group_permutations'].keys():
permutation = cls.__group_data['sibling_group_permutations'][sibling_group_id]
if permutation is not None:
cls.permute_sibling_group(fx_graph, sibling_group_id, permutation)
return fx_graph
@staticmethod
def insert_MHA_out_proj(fx_graph, MHA_node, verbosity):
"""MHA nodes have a hidden out_proj node, so insert it and fix up neighboring nodes"""
if verbosity > 1:
print(f"Inserting MHA out_proj for node {MHA_node}")
out_proj_node_name = MHA_node + ".out_proj"
# insert the new node
fx_graph[out_proj_node_name] = {}
fx_graph[out_proj_node_name]['parents'] = [MHA_node]
fx_graph[out_proj_node_name]['children'] = fx_graph[MHA_node]['children']
fx_graph[MHA_node]['children'] = [out_proj_node_name]
# set the new node's properties
fx_graph[out_proj_node_name]['fx_op'] = 'call_module'
fx_graph[out_proj_node_name]['module_type'] = 'torch.nn.modules.linear.Linear'
fx_graph[out_proj_node_name]['groups_param'] = 'None'
fx_graph[out_proj_node_name]['C_param'] = fx_graph[MHA_node]['C_param']
fx_graph[out_proj_node_name]['K_param'] = fx_graph[MHA_node]['K_param']
fx_graph[out_proj_node_name]['sibling_group_id'] = None
fx_graph[out_proj_node_name]['coparent_group_id'] = None
# set permutation flags
fx_graph[out_proj_node_name]['C_permutable'] = False
fx_graph[MHA_node]['K_permutable'] = False
fx_graph[MHA_node]['C_permutable'] = True
fx_graph[out_proj_node_name]['K_permutable'] = True
fx_graph[out_proj_node_name]['K_passthru'] = False
fx_graph[out_proj_node_name]['C_permuted'] = False
fx_graph[out_proj_node_name]['K_permuted'] = False
fx_graph[out_proj_node_name]['is_real'] = True
if verbosity > 2:
print(f"\tUpdated: {MHA_node}: {fx_graph[MHA_node]}")
print(f"\tAdded: {out_proj_node_name}: {fx_graph[out_proj_node_name]}")
# update any nodes that thought their parent was the MHA node
for node in fx_graph.keys():
parents = fx_graph[node]['parents']
if node != out_proj_node_name and MHA_node in parents:
parents.remove(MHA_node)
parents.append(out_proj_node_name)
fx_graph[node]['parents'] = parents
if verbosity > 2:
print(f"\tUpdated parents of {node}: {fx_graph[node]}")
return fx_graph
@staticmethod
def init_grouped_conv_permutation_flags(fx_graph, node_name, node_groups, verbosity):
"""Handle grouped convolutions to make dimensions match"""
node_C = int(fx_graph.get(node_name).get('C_param'))
node_K = int(fx_graph.get(node_name).get('K_param'))
node_groups = int(node_groups)
if verbosity > 2:
print(f"\t{node_name} pre-divide C: {node_C}, K: {node_K}, G: {node_groups}")
assert node_C % node_groups == 0
node_C = int(node_C / node_groups)
fx_graph[node_name]['C_param'] = str(node_C)
if verbosity > 2:
print(f"\t{node_name} post-divide C: {node_C}, K: {node_K}, G: {node_groups}")
if node_C == 1: # G == C (C is pre-divided by G)
if node_groups == node_K: # true depthwise, G == C == K (C will be pre-divided by G)
fx_graph[node_name]['K_permutable'] = True
fx_graph[node_name]['K_permuted'] = False
fx_graph[node_name]['K_passthru'] = True
fx_graph[node_name]['is_real'] = False
#else: # G != K, handling a permutation along K would be very tricky and not likely useful
else: # G != C
if node_C > 4 and node_C % 4 == 0: # permutations only help if there's more than one 2:4 pruning group
fx_graph[node_name]['C_permutable'] = True
fx_graph[node_name]['C_permuted'] = False
@classmethod
def init_permutation_flags(cls, fx_graph):
"""Set the permutation flags for each node based only on that node's module type and parameters"""
if cls.__verbosity > 0:
print("\n[init_permutation_flags] Initialize the permutation flags for each node according to module type and parameters")
# initialize some graph-wide trackers
cls.__group_data = {}
cls.__group_data['next_sibling_group_id'] = 0
cls.__group_data['next_coparent_group_id'] = 0
cls.__group_data['sibling_groups'] = {}
cls.__group_data['sibling_group_permutations'] = {}
cls.__group_data['sibling_group_C_params'] = {}
cls.__group_data['skipped_sibling_groups'] = {}
cls.__group_data['coparent_groups'] = {}
cls.__group_data['skipped_coparent_groups'] = {}
# track MHA nodes
MHA_nodes = []
# initialize each node's details
for node_name in fx_graph.keys():
fx_node = fx_graph.get(node_name)
node_module_type = fx_node.get('module_type')
if cls.__verbosity > 1:
if node_module_type == 'get_attr':
print(f"Initializing node {node_name} of type {node_module_type}")
else:
print(f"Initializing node {node_name} of type {node_module_type}: {fx_node}")
# default for all nodes: don't allow anything
if node_module_type is not None:
fx_graph[node_name]['C_permutable'] = False # does this node have parameters that can be permuted in C
fx_graph[node_name]['K_permutable'] = False # does this node have parameters that can be permuted in K
fx_graph[node_name]['K_passthru'] = False # does this node need to pass a K permutation to its parents
fx_graph[node_name]['is_real'] = False
fx_graph[node_name]['C_permuted'] = False
fx_graph[node_name]['K_permuted'] = False
# initialize sibling and coparent groups
fx_graph[node_name]['sibling_group_id'] = None
fx_graph[node_name]['coparent_group_id'] = None
# update each node to be more permissive if supported
if node_module_type in cls.__permutation_target_module_types:
fx_graph[node_name]['is_real'] = True
node_groups = fx_graph.get(node_name).get('groups_param')
if (node_groups in ['None', '1']): # no groups, no constraints
fx_graph[node_name]['C_permutable'] = True
fx_graph[node_name]['K_permutable'] = True
else: # handle groups
Permutation.init_grouped_conv_permutation_flags(fx_graph, node_name, node_groups, cls.__verbosity)
elif node_module_type in cls.__permute_K_and_passthru_module_types:
fx_graph[node_name]['K_permutable'] = True
fx_graph[node_name]['K_passthru'] = True
fx_graph[node_name]['is_real'] = False
elif node_module_type in cls.__simple_passthru_module_types:
fx_graph[node_name]['K_passthru'] = True
fx_graph[node_name]['is_real'] = False
elif node_module_type in cls.__disallow_permutations_module_types:
fx_graph[node_name]['is_real'] = True
fx_graph[node_name]['C_param'] = 1
fx_graph[node_name]['K_param'] = 1
fx_graph[node_name]['groups_param'] = 1
elif 'activation' in node_module_type:
if cls.__verbosity > 0:
print(f"WARNING: how should permutation flags be initialized for node {node_name} of module type {node_module_type}? Found 'activation', assuming simple passthru behavior.")
fx_graph[node_name]['K_passthru'] = True
fx_graph[node_name]['is_real'] = False
else:
if cls.__verbosity > 0:
print(f"WARNING: how should permutation flags be initialized for node {node_name} of module type {node_module_type}? Defaulting to strict, disallowing permutations around it.")
# is_real coupled with disallowed C and K permutations will poison real parents and real children
fx_graph[node_name]['is_real'] = True
# dummy entries:
fx_graph[node_name]['C_param'] = 1
fx_graph[node_name]['K_param'] = 1
fx_graph[node_name]['groups_param'] = 1
# MHA nodes only handle the in_proj, need to add out_proj nodes explicitly
# keep track here so we can iterate directly and change fx_graph keys
if node_module_type == 'torch.nn.modules.activation.MultiheadAttention':
MHA_nodes.append(node_name)
if cls.__verbosity > 1:
if node_module_type == 'get_attr':
print(f"\tInitialized node {node_name} of type {node_module_type}")
else:
print(f"\tInitialized node {node_name} of type {node_module_type}: {fx_graph[node_name]}")
for MHA_node in MHA_nodes:
fx_graph = Permutation.insert_MHA_out_proj(fx_graph, MHA_node, cls.__verbosity)
return fx_graph
@staticmethod
def collect_siblings(fx_graph, node_name, all_siblings):
"""Recursively build a set of some node's siblings in the graph"""
# find all siblings of the requested node
siblings = set()
parents = fx_graph.get(node_name).get('real_parents')
for parent in parents:
children = fx_graph.get(parent).get('real_children')
for child in children:
siblings.add(child)
# separate the new siblings, since we'll need to process them recursively
new_siblings = siblings.difference(all_siblings)
# update the final list with just the new elements
all_siblings.update(new_siblings)
for new_sibling in new_siblings:
all_siblings = Permutation.collect_siblings(fx_graph, new_sibling, all_siblings)
return all_siblings
@staticmethod
def propagate_sibling_group(fx_graph, all_siblings, verbosity):
"""Check a sibling group for ability to be permuted, disallow all siblings and coparents if there's an issue"""
made_change = False
allow_C = True
for sibling in all_siblings:
pre_check = allow_C
allow_C = allow_C and fx_graph[sibling]['C_permutable']
if allow_C != pre_check:
if verbosity > 2:
if fx_graph[sibling]['module_type'] == 'get_attr':
print(f"\tnode {sibling} has poisoned the sibling group of {all_siblings}")
else:
print(f"\tnode {sibling} has poisoned the sibling group of {all_siblings}: {fx_graph[sibling]}")
break
if not allow_C:
for sibling in all_siblings:
made_change = made_change or fx_graph[sibling]['C_permutable']
fx_graph[sibling]['C_permutable'] = False
# only disable permutation along K for parents if this node cannot passthru, either
if not fx_graph[sibling]['K_passthru']:
sibling_parents = fx_graph[sibling]['real_parents']
for sibling_parent in sibling_parents:
made_change = made_change or fx_graph[sibling_parent]['K_permutable'] or fx_graph[sibling_parent]['K_passthru']
fx_graph[sibling_parent]['K_permutable'] = False
fx_graph[sibling_parent]['K_passthru'] = False
return made_change
@staticmethod
def collect_coparents(fx_graph, node_name, all_coparents):
"""Recursively build a set of all coparents of a particular node in the graph"""
# find all coparents of the requested node
coparents = set()
children = fx_graph.get(node_name).get('real_children')
for child in children:
parents = fx_graph.get(child).get('real_parents')
for parent in parents:
coparents.add(parent)
# coparents are used to restrict what nodes can be permuted along C, so we need to track if the current parents also pass their K permutations up
if fx_graph[parent]['K_passthru']:
grandparents = fx_graph[parent]['real_parents']
for grandparent in grandparents:
coparents = coparents.union(Permutation.collect_coparents(fx_graph, grandparent, coparents))
# separate the new coparents, since we'll need to process them recursively
new_coparents = coparents.difference(all_coparents)
# update the final list with just the new elements
all_coparents.update(new_coparents)
for new_coparent in new_coparents:
all_coparents = Permutation.collect_coparents(fx_graph, new_coparent, all_coparents)
return all_coparents
@staticmethod
def propagate_coparent_group(fx_graph, all_coparents, verbosity):
"""Check a coparent group for ability to be permuted, disallow all fellow coparents and children if there's an issue"""
# see if all coparents agree that K can be permuted
allow_K = True
made_change = False
for coparent in all_coparents:
pre_check = allow_K
allow_K = allow_K and (fx_graph[coparent]['K_permutable'] or fx_graph[coparent]['K_passthru'])
if allow_K != pre_check:
if verbosity > 2:
if fx_graph[coparent]['module_type'] == 'get_attr':
print(f"\tnode {coparent} has poisoned the coparent group of {all_coparents}")
else:
print(f"\tnode {coparent} has poisoned the coparent group of {all_coparents}: {fx_graph[coparent]}")
break
# if anyone says no, force everyone to 'no', keep track of updated state
if not allow_K:
for coparent in all_coparents:
# all coparents can no longer be permuted along K
if fx_graph[coparent]['K_permutable'] or fx_graph[coparent]['K_passthru']:
made_change = True
fx_graph[coparent]['K_permutable'] = False
fx_graph[coparent]['K_passthru'] = False
# children of coparents can't be permuted along C
coparent_children = fx_graph[coparent]['real_children']
for coparent_child in coparent_children:
if fx_graph[coparent_child]['C_permutable']:
fx_graph[coparent_child]['C_permutable'] = False
made_change = True
return made_change
@classmethod
def fixup_concats(cls, fx_graph):
"""concat operations/modules may concatenate along the channel dimension, which requires special handling (like grouped convs)"""
if cls.__verbosity > 0:
print("[fixup_concats]")
for node_name in fx_graph.keys():
fx_node = fx_graph[node_name]
if fx_node.get('module_type') == 'concat':
# get real parents, find GCD of their Ks
node_real_parents = fx_node['real_parents']
# some concats are at the front of networks (googlenet)
if len(node_real_parents) == 0:
continue
parents_K_params = []
for parent in node_real_parents:
parent_K_param = int(fx_graph[parent]['K_param'])
parents_K_params.append(parent_K_param)
fx_graph[parent]['allow_K_mismatch'] = 'concat op'
# if grouped convolutions make the input channels different among siblings different sizes,
# restrict the permutation atom to the greatest common divisor so it can be tiled as needed for each sibling (and parent)
if cls.__verbosity > 2:
print(f"\tfixing up concat node {node_name}, found parents' {node_real_parents} Ks: {parents_K_params}")
children_GCD_param = str(np.gcd.reduce(parents_K_params))
# set this to GCD of children's sibling group
sibling_group_id = -1
node_real_children = fx_node['real_children']
for child in node_real_children:
sibling_group_id = fx_graph[child]['sibling_group_id']
fx_graph[child]['C_param'] = children_GCD_param
old_children_GCD = cls.__group_data['sibling_group_C_params'][sibling_group_id]
cls.__group_data['sibling_group_C_params'][sibling_group_id] = children_GCD_param
# fixup this node's dimensions
# use the functionality of grouped convolutions
fx_node['C_param'] = children_GCD_param
fx_node['K_param'] = old_children_GCD
fx_node['groups_param'] = str(int(old_children_GCD) // int(children_GCD_param))
if cls.__verbosity > 2:
print(f"\tfixed up concat node {node_name}, found GCD of parents' {node_real_parents} K to be {children_GCD_param}, updated children's {node_real_children} C_params and sibling group {sibling_group_id} GCD")
print(f"\tthis node now: {fx_node}")
return fx_graph
@classmethod
def enforce_dimension_agreement(cls, fx_graph):
"""Check all nodes' channel dimensions against parents and children to make sure they agree; e.g. flatten ops may change these dimensions"""
if cls.__verbosity > 0:
print("[enforce_dimension_agreement]")
for node_name in fx_graph.keys():
fx_node = fx_graph[node_name]
if 'is_real' in fx_node.keys() and fx_node['is_real']:
# enforce this node's input dimension matches its parents' output dimensions
node_C = int(fx_node['C_param'])
node_K = int(fx_node['K_param'])
if fx_graph[node_name]['groups_param'] not in ['1', 'None']:
node_C = node_C * int(fx_node['groups_param'])
node_real_parents = fx_node['real_parents']
if len(node_real_parents) == 0:
if cls.__verbosity > 1:
print(f"\t{node_name} has no real parents, disabling permutations along C")
fx_graph[node_name]['C_permutable'] = False
else:
for real_parent in node_real_parents:
parent_K = int(fx_graph[real_parent]['K_param'])
ignore_mismatch = fx_graph[real_parent].get('allow_K_mismatch')
if ignore_mismatch is not None:
if cls.__verbosity > 1:
print(f"\tIgnoring dimension mismatch between {node_name} (C={node_C}) and its parent {real_parent} (K={parent_K}) as requested: {ignore_mismatch}")
elif parent_K >= 0 and node_C != parent_K:
if cls.__verbosity > 1:
print(f"\tDimensions mismatch between {node_name} (C={node_C}) and its parent {real_parent} (K={parent_K}), disallowing the relevant permutations")
fx_graph[node_name]['C_permutable'] = False
fx_graph[real_parent]['K_permutable'] = False
if cls.__verbosity > 2:
print(f"\t{fx_graph[node_name]}\n\t{fx_graph[real_parent]}")
if len(fx_graph[node_name]['real_children']) == 0:
if cls.__verbosity > 1:
print(f"\t{node_name} has no real children, disabling permutations along K")
fx_graph[node_name]['K_permutable'] = False
return fx_graph
@classmethod
def make_sibling_coparent_groups(cls, fx_graph):
"""Traverse all real nodes in the graph and collect their siblings and coparents"""
if cls.__verbosity > 0:
print("[make_sibling_coparent_groups]")
for node_name in fx_graph.keys():
fx_node = fx_graph[node_name]
if 'is_real' in fx_node.keys() and fx_node['is_real']:
sibling_group_id = fx_node['sibling_group_id']
if sibling_group_id is None: # need to make a new sibling group for this node
all_siblings = cls.collect_siblings(fx_graph, node_name, set([node_name]))
all_siblings = sorted(all_siblings) # deterministic order for DDP setups
sibling_group_id = cls.__group_data['next_sibling_group_id']
cls.__group_data['sibling_groups'][sibling_group_id] = all_siblings
cls.__group_data['next_sibling_group_id'] = sibling_group_id + 1
sibling_group_C_params = []
for sibling in all_siblings:
fx_graph[sibling]['sibling_group_id'] = sibling_group_id
sibling_C_param = int(fx_graph[sibling]['C_param'])
sibling_group_C_params.append(sibling_C_param)
# if grouped convolutions make the input channels different among siblings different sizes,
# restrict the permutation atom to the greatest common divisor so it can be tiled as needed for each sibling (and parent)
sibling_group_C_param = str(np.gcd.reduce(sibling_group_C_params))
cls.__group_data['sibling_group_C_params'][sibling_group_id] = sibling_group_C_param
cls.__group_data['skipped_sibling_groups'][sibling_group_id] = None
if cls.__verbosity > 1:
print(f"New sibling group {sibling_group_id} with GCD(C) of {sibling_group_C_param}: {all_siblings}")
coparent_group_id = fx_node['coparent_group_id']
if coparent_group_id is None:
all_coparents = cls.collect_coparents(fx_graph, node_name, set([node_name]))
coparent_group_id = cls.__group_data['next_coparent_group_id']
cls.__group_data['coparent_groups'][coparent_group_id] = all_coparents
cls.__group_data['next_coparent_group_id'] = coparent_group_id + 1
cls.__group_data['skipped_coparent_groups'][coparent_group_id] = None
for coparent in all_coparents:
fx_graph[coparent]['coparent_group_id'] = coparent_group_id
if cls.__verbosity > 1:
print(f"New coparent group {coparent_group_id}: {all_coparents}")
return fx_graph
@classmethod
def propagate_permutation_flags(cls, fx_graph):
"""Disallow sibling groups from having different C_permutable flags and coparent groups from having different K_permutable flags within the groups"""
made_change = True # will we need to repeat this propagation?
# TODO: just propagate to sibling groups and coparent groups directly, instead of iteratively to direct real_parents and siblings
while made_change:
made_change = False
if cls.__verbosity > 0:
print("Making a pass at propagating permutation flags")
for node_name in fx_graph.keys():
fx_node = fx_graph.get(node_name)
node_parents = fx_graph.get(node_name).get('parents')
node_real_parents = fx_graph.get(node_name).get('real_parents')
node_children = fx_graph.get(node_name).get('children')
node_real_children = fx_graph.get(node_name).get('real_children')
# input layers can't be permuted along C without a runtime fixup, skip them
if node_parents is None or ('x' in node_parents and 'C_permutable' in fx_graph[node_name].keys() and fx_graph[node_name]['C_permutable']):
if cls.__verbosity > 1:
print(f"{node_name} has no parents, or only an input, disabling permutations in C")
made_change = True
fx_graph[node_name]['C_permutable'] = False
# output layers can't be permuted along K without a runtime fixup, skip them
if node_children is None or ('output' in node_children and 'K_permutable' in fx_graph[node_name].keys() and fx_graph[node_name]['K_permutable']):
if cls.__verbosity > 1:
print(f"{node_name} has no children, or only an output, disabling permutations in K")
made_change = True
fx_graph[node_name]['K_permutable'] = False
fx_graph[node_name]['K_passthru'] = False
if 'is_real' in fx_node.keys() and fx_node['is_real']:
# siblings must share C-flags; if one cannot be permuted along C, none can
sibling_group_id = fx_graph[node_name]['sibling_group_id']
all_siblings = cls.__group_data['sibling_groups'][sibling_group_id]
made_change = cls.propagate_sibling_group(fx_graph, all_siblings, cls.__verbosity) or made_change
# coparents must share K-flags; if one cannot be permuted along K, none can
coparent_group_id = fx_graph[node_name]['coparent_group_id']
all_coparents = cls.__group_data['coparent_groups'][coparent_group_id]
made_change = cls.propagate_coparent_group(fx_graph, all_coparents, cls.__verbosity) or made_change
return fx_graph
@classmethod
def find_node_real_children(cls, fx_graph, node_name, found_children):
"""Collect the real children of some node"""
if 'real_children' in fx_graph[node_name].keys():
return found_children.union(fx_graph[node_name]['real_children'])
children = fx_graph[node_name]['children']
for child in children:
if child in fx_graph.keys(): # not the output node
if cls.__verbosity > 3:
print(f"\tchecking child {child} of node {node_name}")
# if it's a real node, just add it
if 'is_real' in fx_graph[child].keys() and fx_graph[child]['is_real']:
found_children.add(child)
else: # otherwise, search its children
found_children = cls.find_node_real_children(fx_graph, child, found_children)
return found_children
@classmethod
def find_real_children(cls, fx_graph):
"""Collect the real children of all nodes in the graph"""
if cls.__verbosity > 0:
print("\n[find_real_children] Find the real children for each node according to the whole network graph built with Torch.FX")
reversible_fx_graph_keys = list(fx_graph.keys())
for node_name in reversed(reversible_fx_graph_keys): # as the optimization, we need to find the real children from back to front, to use the already saved 'real_children'
node_children = fx_graph.get(node_name).get('children')
if cls.__verbosity > 2:
print("[find_real_children] node_name: \'{:}\', children: {:}".format(node_name, node_children))
real_children = cls.find_node_real_children(fx_graph, node_name, set())
if cls.__verbosity > 1:
print(f"[find_real_children] {node_name} has {len(real_children)} real children: {real_children}")
fx_graph[node_name]['real_children'] = sorted(real_children)
if cls.__save_permutation_graph:
cls.save_graph_to_json(fx_graph, save_dumped_graph_path_with_name=os.path.join(cls.__permutation_output_dir, './model_graph_find_real_children.json')) # save the intermediate graph as JSON file for debugging
return fx_graph
@classmethod
def find_node_real_parents(cls, fx_graph, node_name, found_parents):
"""Collect the real parents of some node"""
if 'real_parents' in fx_graph[node_name].keys():
return found_parents.union(fx_graph[node_name]['real_parents'])
parents = fx_graph[node_name]['parents']
for parent in parents:
if parent in fx_graph.keys(): # not the input node
if cls.__verbosity > 3:
print(f"\tchecking parent {parent} of node {node_name}")
# if it's a real node, just add it
if 'is_real' in fx_graph[parent].keys() and fx_graph[parent]['is_real']:
found_parents.add(parent)
else: # otherwise, search its parents
found_parents = cls.find_node_real_parents(fx_graph, parent, found_parents)
return found_parents
@classmethod
def find_real_parents(cls, fx_graph):
"""Collect the real parents of all nodes in the graph"""
if cls.__verbosity > 0:
print("\n[find_real_parents] Find the real parents for each node according to the whole network graph built with Torch.FX")
for node_name in fx_graph.keys():
node_real_parents_name = []
node_real_parents_module_type = []
real_parents = cls.find_node_real_parents(fx_graph, node_name, set())
if cls.__verbosity > 1:
print(f"[find_real_parents] {node_name} has {len(real_parents)} real parents: {real_parents}")
fx_graph[node_name]['real_parents'] = sorted(real_parents)
if cls.__save_permutation_graph:
cls.save_graph_to_json(fx_graph, save_dumped_graph_path_with_name=os.path.join(cls.__permutation_output_dir, './model_graph_find_real_parent.json')) # save the intermediate graph as JSON file for debugging
return fx_graph
@classmethod
def build_fx_graph(cls, model, dump_fx_graph=False, save_dumped_fx_graph='./model_fx_graph.json'):
"""Build the whole network graph with Torch.FX."""
network_fx_graph = {}
success = True
torch_version = str(torch.__version__)
torch_version_major = int(torch_version.split('.')[0])
torch_version_minor = int(torch_version.split('.')[1])
try:
torch_version_minimum = int(torch_version.split('.')[2])
except ValueError: # support the none standard version
torch_version_minimum = torch_version.split('.')[2]
if cls.__verbosity > 2:
print("[build_fx_graph] The torch version is: {}, version major is: {}, version minor is: {}, version minimum is: {}".format(torch_version, torch_version_major, torch_version_minor, torch_version_minimum))
if torch_version_major >= 2 or (torch_version_major >= 1 and torch_version_minor >= 8):
if cls.__verbosity > 1:
print("[build_fx_graph] The Torch.FX is supported.")
else: # Torch.FX is introduced in torch 1.8.0
if cls.__verbosity >= 0:
print("[build_fx_graph] The Torch.FX is not supported. So cannot build the Torch.FX graph.")
success = False
return network_fx_graph, success
if cls.__verbosity > 2:
print("\n[build_fx_graph] Print the model structure with pure PyTorch function")
print(model)
graph_module = cls.trace_and_print_raw_fx_graph(model, print_tabular=cls.__verbosity > 1) # needs "tabulate" library
if graph_module is None:
success = False
return network_fx_graph, success
if cls.__verbosity > 0:
print("\n[build_fx_graph] Build the module name and type dictionary")
module_name_type_dict = {}
module_name_group_conv_dict = {}
module_name_C_dict = {}
module_name_K_dict = {}
for name, mod in model.named_modules():
if cls.__verbosity > 1:
print("[build_fx_graph] module_name: {}, module type: {}".format(name, type(mod)))
module_name_type_dict[name] = str(type(mod)).split("\'")[1]
try:
module_name_C_dict[name] = str(mod.in_channels)
except:
try:
module_name_C_dict[name] = str(mod.in_features)
except:
try:
module_name_C_dict[name] = str(mod.embed_dim)
except:
module_name_C_dict[name] = 'None'
try:
module_name_K_dict[name] = str(mod.out_channels)
except:
try:
module_name_K_dict[name] = str(mod.out_features)
except:
try:
module_name_K_dict[name] = str(mod.embed_dim)
except:
module_name_K_dict[name] = 'None'
try:
module_name_group_conv_dict[name] = str(mod.groups)
if cls.__verbosity > 1:
print("[build_fx_graph] this module has \'group\' param with value: {}".format(mod.groups))
except:
module_name_group_conv_dict[name] = 'None'
continue
# keep track of children and parents for each layer (could be call_module or call_function)
if cls.__verbosity > 0:
print("\n[build_fx_graph] Print the children and parents relationship for each layer")
network_fx_graph = {}
for node in graph_module.graph.nodes:
if node.op == 'placeholder':
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'input\' node: {:}".format(node.target))
continue
elif node.op == 'get_attr':
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'get_attr\' node: {:}".format(node.target))
node_parent, node_children = get_node_parent_children(node)
converted_node_name=convert_fx_node_name(node.target)
network_fx_graph[converted_node_name] = {}
network_fx_graph[converted_node_name]['parents'] = node_parent
network_fx_graph[converted_node_name]['children'] = node_children
network_fx_graph[converted_node_name]['module_type'] = 'get_attr'
network_fx_graph[converted_node_name]['groups_param'] = 'None'
# inspired by https://pytorch.org/docs/stable/fx.html
def fetch_attr(target : str, mod):
target_atoms = target.split('.')
attr_itr = mod
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}")
attr_itr = getattr(attr_itr, atom)
return attr_itr
attr = fetch_attr(node.target, graph_module)
network_fx_graph[converted_node_name]['C_param'] = 1
network_fx_graph[converted_node_name]['K_param'] = -1
network_fx_graph[converted_node_name]['attr'] = attr
elif node.op == 'call_function': # e.g. 'adaptive.avg.pool2d', 'add', 'cat', 'flatten', 'floordiv', 'getattr', 'getitem', 'hardsigmoid', 'mean', 'mul', 'relu', 'transpose'
node_parent, node_children = get_node_parent_children(node)
converted_node_name=convert_fx_node_name(node.name)
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'call_function\' node: {:}, its parent list: {:}, its children list: {:}".format(converted_node_name, node_parent, node_children))
network_fx_graph[converted_node_name] = {}
network_fx_graph[converted_node_name]['parents'] = node_parent
network_fx_graph[converted_node_name]['children'] = node_children
network_fx_graph[converted_node_name]['fx_op'] = 'call_function'
### "convert" some ops to modules
# concatenating along K can be handled by reducing the size of the childrens' C appropriately
# see fixup_concats, if no dim arg, default is 0 (handled automatically)
if node.target == torch.cat and len(node.args) > 1 and node.args[1] == 1:
network_fx_graph[converted_node_name]['fx_op'] = 'call_module'
network_fx_graph[converted_node_name]['module_type'] = 'concat'
network_fx_graph[converted_node_name]['groups_param'] = 'N/A' # just need placeholders
network_fx_graph[converted_node_name]['C_param'] = 'N/A'
network_fx_graph[converted_node_name]['K_param'] = 'N/A'
elif node.op == 'call_method': # e.g. 'chunk', 'contiguous', 'mean', 'size', 'unsqueeze', 'view'
node_parent, node_children = get_node_parent_children(node)
converted_node_name=convert_fx_node_name(node.name)
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'call_method\' node: {:}, its parent list: {:}, its children list: {:}".format(converted_node_name, node_parent, node_children))
network_fx_graph[converted_node_name] = {}
network_fx_graph[converted_node_name]['parents'] = node_parent
network_fx_graph[converted_node_name]['children'] = node_children
network_fx_graph[converted_node_name]['fx_op'] = 'call_method'
continue
elif node.op == 'call_module':
node_parent, node_children = get_node_parent_children(node)
converted_node_name=convert_fx_node_name(node.name)
# check whether the converted_node_name is same as node.target, especially for ReLU case
if converted_node_name != node.target:
if cls.__verbosity > 2:
print("[build_fx_graph][warning] The target name from Torch.FX is \'{:}\', the manually converted node name is \'{:}\', not the same one, choose the converted node name".format(node.target, converted_node_name))
# assume the modules share the same target name have the same type, because converted_node_name may not be obtained by model.named_modules(), like some ReLU (defined in forward function)
node_type = module_name_type_dict[node.target]
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'call_module\' node: {:}, its parent list: {:}, its children list: {:}, its type: {:}".format(converted_node_name, node_parent, node_children, node_type))
network_fx_graph[converted_node_name] = {}
network_fx_graph[converted_node_name]['parents'] = node_parent
network_fx_graph[converted_node_name]['children'] = node_children
network_fx_graph[converted_node_name]['fx_op'] = 'call_module'
network_fx_graph[converted_node_name]['module_type'] = node_type
network_fx_graph[converted_node_name]['groups_param'] = module_name_group_conv_dict[node.target]
network_fx_graph[converted_node_name]['C_param'] = module_name_C_dict[node.target]
network_fx_graph[converted_node_name]['K_param'] = module_name_K_dict[node.target]
elif node.op == 'output':
if cls.__verbosity > 2:
print("[build_fx_graph] This is the \'output\' node: {:}".format(node.target))
continue
if dump_fx_graph:
if cls.__verbosity > 0:
print("\n[build_fx_graph] Dump the overall dict for children and parents relationship into JSON file")
cls.save_graph_to_json(network_fx_graph, save_dumped_graph_path_with_name=save_dumped_fx_graph)
return network_fx_graph, success
@classmethod
def trace_and_print_raw_fx_graph(cls, model, print_tabular=False, generate_python_code=False):
"""This function is used to find and print the intermediate representation (IR) - Graph representation with Torch.FX features."""
from torch.fx import symbolic_trace
import traceback
# Symbolic tracing frontend - captures the semantics of the module
try:
symbolic_traced : torch.fx.GraphModule = symbolic_trace(model)
except Exception as ex:
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
if cls.__verbosity > 0:
print(ex)
print(''.join(traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__)))
print("\n[print_raw_fx_graph] Meet the fatal fault when trying to symbolic trace the model with Torch.FX")
return None
# High-level intermediate representation (IR) - Graph representation
if cls.__verbosity > 1:
print("\n[print_raw_fx_graph] Print the intermediate representation (IR) with Torch.FX")
print(symbolic_traced.graph)
if print_tabular:
print("\n[print_raw_fx_graph] Print the intermediate representation (IR) with Torch.FX in a table format")
try:
from tabulate import tabulate
symbolic_traced.graph.print_tabular()
except ImportError:
if cls.__verbosity > 1:
print("[print_raw_fx_graph][Warning] \'print_tabular\' relies on the library `tabulate`; run `pip install tabulate` to install it.")
except AttributeError: # to avoid the AttributeError: 'Graph' object has no attribute 'print_tabular'
if cls.__verbosity > 1:
print("[print_raw_fx_graph][Warning] \'print_tabular\' function is not supported in current Torch version. Skip!")
# Code generation - valid Python code
if generate_python_code:
print("\n[print_raw_fx_graph] Create valid Python code matching the IR/Graph's semantics with Torch.FX")
print(symbolic_traced.code)
return symbolic_traced
@classmethod
def save_graph_to_json(cls, graph, save_dumped_graph_path_with_name='./model_fx_graph.json'):
"""This function is used to save the graph into JSON file for inspection."""
# use dumps to transfer the dict to JSON string
json_graph_str = json.dumps(graph)
with open(save_dumped_graph_path_with_name, 'w', encoding='utf-8') as dumped_graph_file:
dumped_graph_file.write(json_graph_str) # write the transferred JSON string into JSON file
|
apex-master
|
apex/contrib/sparsity/permutation_lib.py
|
import sys
import torch
import numpy as np
import collections
from itertools import permutations
""" compute density (helper fn to compute % NNZs in a tensor) """
def fill(x):
return float(x.nonzero().size(0))/torch.numel(x)
""" reshape matrix into m-dimensional vectors: (h,w) -> (hw/m, m) """
def reshape_1d(matrix, m):
# If not a nice multiple of m, fill with zeroes.
if matrix.shape[1] % m > 0:
mat = torch.cuda.FloatTensor(matrix.shape[0], matrix.shape[1] + (m-matrix.shape[1]%m)).fill_(0)
mat[:, :matrix.shape[1]] = matrix
shape = mat.shape
return mat.view(-1,m),shape
else:
return matrix.view(-1,m), matrix.shape
""" return all possible m:n patterns in a 1d vector """
valid_m4n2_1d_patterns = None
def compute_valid_1d_patterns(m,n):
# Early exit if patterns was already created.
global valid_m4n2_1d_patterns
if m==4 and n==2 and valid_m4n2_1d_patterns is not None: return valid_m4n2_1d_patterns
patterns = torch.zeros(m)
patterns[:n] = 1
valid_patterns = torch.tensor(list(set(permutations(patterns.tolist()))))
if m == 4 and n == 2: valid_m4n2_1d_patterns = valid_patterns
return valid_patterns
""" m:n 1d structured best """
def mn_1d_best(matrix, m, n):
# Find all possible patterns.
patterns = compute_valid_1d_patterns(m,n).cuda()
# Find the best m:n pattern (sum of non-masked weights).
mask = torch.cuda.IntTensor(matrix.shape).fill_(1).view(-1,m)
mat,shape = reshape_1d(matrix,m)
pmax = torch.argmax(torch.matmul(mat.abs(),patterns.t()), dim=1)
mask[:] = patterns[pmax[:]]
mask = mask.view(matrix.shape)
return mask
def m4n2_1d(mat, density):
return mn_1d_best(mat, 4, 2)
"""
Below 2d-masking related code is targeted more for training (from scratch).
2d-pruning of a weight tensor is done to accelerate DGRAD step during backprop
phase of training algorithm. Acceleration comes from using SpMMA instructions in
Tensor Cores of NVIDIA Ampere GPU Architecture
(note: this code does not do the acceleration, GPU kernels are required for this).
1d pruning of weight tensor helps speed up FPROP step by pruning in 2:4 pattern
along the horizontal (logical) direction.
During DGRAD step, weight tensor is transposed. 2d pruning functions below, mask
weight tensor such that their transposed versions are also 2:4 sparse along the
horizontal (logical) direction. Thus, with 2d pruning, weight tensors are
2:4 sparse along row and column directions.
"""
""" m:n 2d structured pruning: greedy method to select mask """
def mn_2d_greedy(matrix, m, n):
# Convert to numpy
mat = matrix.cpu().detach().numpy()
mask = np.ones(mat.shape, dtype=int)
rowCount = int(mat.shape[0]/m) * m
colCount = int(mat.shape[1]/m) * m
for rowStartIdx in range(0, rowCount, m):
rowEndIdx = rowStartIdx + m
for colStartIdx in range(0, colCount, m):
colEndIdx = colStartIdx + m
matrixSub = np.absolute(np.squeeze(mat[rowStartIdx:rowEndIdx, colStartIdx:colEndIdx]))
maskSub = np.squeeze(mask[rowStartIdx:rowEndIdx, colStartIdx:colEndIdx])
maskSub.fill(0.0)
matrixVecView = matrixSub.reshape(-1)
maskVecView = maskSub.reshape(-1)
linearIdx = np.argsort(matrixVecView)
matrixIdx = [(int(x/m), x % m) for x in linearIdx]
rowCounter = collections.Counter()
colCounter = collections.Counter()
for currIdx in range(len(linearIdx) - 1, -1, -1):
currMatrixEntry = matrixIdx[currIdx]
if (rowCounter[currMatrixEntry[0]] == n) or (colCounter[currMatrixEntry[1]] == n):
continue
#end if
maskSub[currMatrixEntry[0], currMatrixEntry[1]] = 1.0
rowCounter[currMatrixEntry[0]] += 1
colCounter[currMatrixEntry[1]] += 1
return torch.tensor(mask.cuda())
def m4n2_2d_greedy(mat, density):
return mn_2d_greedy(mat, 4, 2)
""" return all possible m:n patterns in a mxn block. """
valid_m4n2_2d_patterns = None
def compute_valid_2d_patterns(m,n):
# Early exit if patterns was already created.
global valid_m4n2_2d_patterns
if valid_m4n2_2d_patterns is not None: return valid_m4n2_2d_patterns
patterns = torch.zeros(m)
patterns[:n] = 1
patterns = list(set(permutations(patterns.tolist())))
patterns = patterns + patterns
patterns = torch.empty(list(set(permutations(patterns,m))))
valid = ((patterns.sum(dim=1) <= n).sum(dim=1) == m).nonzero().view(-1)
valid_patterns = torch.empty(valid.shape[0],m,m)
valid_patterns[:] = patterns[valid[:]]
if m == 4 and n == 2: valid_m4n2_2d_patterns = valid_patterns
return valid_patterns
""" m:n 2d structured pruning: exhaustive method to select best mask """
def mn_2d_best(matrix, m, n):
# Find all possible patterns.
patterns = compute_valid_2d_patterns(m,n).cuda()
# Find the best m:n pattern (sum of non-masked weights).
mask = torch.cuda.IntTensor(matrix.shape).fill_(1)
mat = reshape_2d(matrix,m,m).abs()
pmax = torch.argmax(torch.matmul(mat,patterns.view(patterns.shape[0],m*m).t()), dim=2)
# Copy best m:n patterns into mask.
mat = mat.view(mat.shape[0]*mat.shape[1],-1)
pmax = pmax.view(pmax.shape[0]*pmax.shape[1]).unsqueeze(1).expand(-1,mat.shape[1])
patterns = patterns.view(patterns.shape[0],patterns.shape[1]*patterns.shape[2])
mat = torch.gather(patterns,0,pmax)
mat = reshape_2d_inv(mat.view(matrix.shape[0]//m,matrix.shape[1]//m,m,m))
mask.copy_(mat.type(mask.type()))
return mask
def m4n2_2d_best(mat, density):
return mn_2d_best(mat, 4, 2)
""" returns a sparse mask """
def create_mask(tensor, pattern="m4n2_1d", density=0.5):
# Reshape tensor and mask.
shape = tensor.shape
ttype = tensor.type()
t = tensor.float().contiguous()
# 1d-tensor
if len(shape) == 1:
t = t.view(1, shape[0])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
# 2d-tensor (K, C)
elif len(shape) == 2:
# linear
t = t.view(shape[0], shape[1])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
# 3d-tensor (K, C, R)
elif len(shape) == 3:
# 1d convs
t = t.permute(0,2,1).contiguous().view(shape[0]*shape[2], shape[1])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
mask = mask.view(shape[0], shape[2], shape[1]).permute(0,2,1).contiguous()
return mask.view(shape).type(ttype)
# 4d-tensor (K, C, R, S)
elif len(shape) == 4:
"""
# transformers (bmm)
t = t.view(shape[0]*shape[1]*shape[2], shape[3])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
return mask.view(shape).type(ttype)
"""
# 2d convs
t = t.permute(2,3,0,1).contiguous().view(shape[2]*shape[3]*shape[0], shape[1])
func = getattr(sys.modules[__name__], pattern, None)
mask = func(t, density)
mask = mask.view(shape[2], shape[3], shape[0], shape[1]).permute(2,3,0,1).contiguous()
return mask.view(shape).type(ttype)
|
apex-master
|
apex/contrib/sparsity/sparse_masklib.py
|
from .permutation_utilities import *
################################################################################################################
# Exhaustive
# Try them all
# - order of columns within a group doesn't matter
# - order of groups doesn't matter
# - we can eliminate effective duplicates by defining aunique combination to be a sorted list of sorted groups
################################################################################################################
####################################################################
# generate unique permutations
####################################################################
# check if adding a column index to a current permutation would keep it in canonical form
# assumes that perm is in canonical form already!
def is_canonical(perm, col):
# if it's a new group
if len(perm) % 4 == 0:
# every column ID < col needs to be in the permutation already
for val in range(col):
if val not in perm:
return False
# this new group needs to be sorted w.r.t. the previous group
return col > perm[-4]
# not a new group, just check to see if it will still be sorted
return col > perm[-1]
# recursive: build a unique permutation one column index at a time
def generate_unique_combinations(built_permutation, remaining_columns, full_permutation_list, group_width):
# base case: nothing else to add
if len(remaining_columns) == 0:
full_permutation_list.append(np.copy(built_permutation))
if len(full_permutation_list) % 1000000 == 0:
print(f"{len(full_permutation_list)} unique permutations found so far")
# still more choices to make, so add each remaining column in turn column if it keeps everything sorted
else:
for c in range(len(remaining_columns)):
# to satisfy our immutables (values within groups are sorted, groups are globally sorted),
# only add this column if either:
# it's starting a new group and is larger than the previous group's first entry
# OR
# it's larger than the last value in the built_permutation
col_to_add = remaining_columns[c]
if is_canonical(built_permutation, col_to_add):
# add the column to the running permutation, remove it from remaining columns
built_permutation.append(col_to_add)
remaining_columns.pop(c)
# recurse
generate_unique_combinations(built_permutation, remaining_columns, full_permutation_list, group_width)
# remove the most recent column and put it back on the remaining column list where we found it (sorted)
remaining_columns.insert(c, built_permutation.pop(-1))
import pickle
import os.path
from os import path
master_unique_permutation_list = {}
def generate_all_unique_combinations(C, M, must_use_all_groups = False):
global master_unique_permutation_list
if len(master_unique_permutation_list) == 0 and path.exists("master_list.pkl"):
with open("master_list.pkl","rb") as cache:
master_unique_permutation_list = pickle.load(cache)
if (C,M) not in master_unique_permutation_list:
full_permutation_list = []
generate_unique_combinations([0], [c for c in range(1,C)], full_permutation_list, M)
master_unique_permutation_list[(C,M)] = full_permutation_list
with open("master_list.pkl", "wb") as cache:
pickle.dump(master_unique_permutation_list, cache)
unique_permutations = master_unique_permutation_list[(C,M)]
return unique_permutations
# analytical solution
import math
def predict_unique_combinations(C, M):
assert(C%M==0)
G = int(C/M)
return int(int(math.factorial(C)) / (int(math.pow(math.factorial(M),G)) * math.factorial(G)))
#################################################################
# exhaustively try all unique permutations
#################################################################
# exhaustively search the entire matrix
def search_matrix(matrix, group_width):
# give up quickly if we'd go on forever
prediction = predict_unique_combinations(matrix.shape[1], group_width)
best_permutation = [c for c in range(matrix.shape[1])]
if prediction > 1e10:
print(f"There are {prediction} unique combinations with {matrix.shape[1]} columns and a group width of {group_width}, not searching.")
return matrix, prediction, best_permutation
start_time = time.perf_counter()
full_permutation_list = generate_all_unique_combinations(matrix.shape[1], group_width)
# found them, now try them
best_improvement = 0.0
use_cuda = use_gpu()
if use_cuda and matrix.shape[1] >= 8 and group_width == 4: # CUDA path only works for a group width of 4
best_improvement, best_permutation = try_permutations_on_matrix(matrix, full_permutation_list)
else:
base_sum = sum_after_2_to_4(matrix)
for i in range(1,len(full_permutation_list)):
permutation = full_permutation_list[i]
permuted = matrix[:, permutation]
cur_improvement = sum_after_2_to_4(permuted) - base_sum
if (cur_improvement > best_improvement):
best_improvement = cur_improvement
best_permutation = permutation
seconds = time.perf_counter() - start_time
return matrix[:, best_permutation], seconds, best_permutation, best_improvement
#############
# Stripe group handling
#############
# gather stripes from a larger matrix into a single matrix
def collect_stripes(matrix, stripes, group_width):
subset = np.zeros((matrix.shape[0], len(stripes)*group_width))
for s,stripe in enumerate(stripes):
subset[...,s*group_width:s*group_width+group_width] = matrix[...,stripe*group_width:stripe*group_width+group_width]
return subset
# apply the stripe group permutation to the entire permutation
def apply_stripe_group_permutation(sgp, stripes, group_width, permutation):
new_permutation = permutation.copy()
for subset_idx in range(len(sgp)):
dst_stripe_idx = stripes[int(subset_idx / group_width)]
dst_col_idx = subset_idx % group_width
subset_val = sgp[subset_idx]
src_stripe_idx = stripes[int(subset_val / group_width)]
src_col_idx = subset_val % group_width
new_permutation[dst_stripe_idx*group_width + dst_col_idx] = permutation[src_stripe_idx*group_width + src_col_idx]
return new_permutation
# generate all possible stripe groups
def generate_stripe_groups(num_stripes, window_size):
stripe_array = [[c] for c in range(num_stripes)]
next_stripe_array = []
for w in range(1, window_size):
for g in range(len(stripe_array)):
start_c = stripe_array[g][w-1]+1
group = stripe_array[g]
for c in range(start_c, num_stripes):
new_group = group.copy()
new_group.append(c)
next_stripe_array.append(new_group)
stripe_array = next_stripe_array
next_stripe_array = []
return set(tuple(stripe_array[g]) for g in range(len(stripe_array)))
# It is not safe to just reset the stripe_set as None here.
# When calling the Exhaustive_Search in E2E search, the stripe_set will not be reset as None.
stripe_set = None
stripe_set_config = None
# build the stripe map
def build_stripe_map(matrix, group_width, window_size, stripe_map, stripe_ids, perm_map, used_stripes):
global stripe_set, stripe_set_config
window_size = int(window_size / group_width)
if stripe_set is None or stripe_set_config is None or stripe_set_config != (group_width, window_size):
num_stripes = int(matrix.shape[1] / group_width)
assert(group_width * num_stripes == matrix.shape[1])
stripe_set = generate_stripe_groups(num_stripes, window_size)
stripe_set_config = (group_width, window_size)
# step through each, update the stripe_map/stripe_ids if necessary
updates = 0
use_cuda = use_gpu()
gpu_list = []
gpu_groups = []
for i,s in enumerate(stripe_set):
sg = [] # build the group of stripes, check if any members changed
need_update = i >= len(stripe_map)
for stripe in s:
sg.append(stripe)
if stripe in used_stripes:
need_update = True
# pre-populate if we're building fresh
if i >= len(stripe_map):
stripe_ids.append(sg)
stripe_map.append(0.)
perm_map.append([c for c in range(group_width * window_size)])
# update entries if needed (only stripe_map and perm_map)
if need_update:
updates += 1
if not use_cuda: # do the work here if using the CPU
subset = collect_stripes(matrix, sg, group_width)
sub_result, sub_duration, permutation, improvement = search_matrix(subset, group_width)
stripe_map[i] = improvement
perm_map[i] = permutation
else: # otherwise, just track the work needed to farm off to the GPU
gpu_groups.append(sg)
gpu_list.append(i)
if use_cuda: # if using the GPU, perform the work
matrix_view = np.copy(matrix).astype(np.float32).flatten()
all_permutations = generate_all_unique_combinations(window_size*group_width, group_width)
num_permutations = len(all_permutations)
permutation_view = np.copy(np.asarray(all_permutations)).astype(np.uint32).flatten()
stripe_groups_view = np.asarray(gpu_groups).astype(np.uint32).flatten()
num_gpu_groups = len(gpu_list)
gpu_improvement = np.zeros((num_gpu_groups), dtype=np.float32).flatten()
gpu_permutation = np.zeros((num_gpu_groups), dtype=np.uint32).flatten()
result = permutation_search_cuda_kernels.build_permute_map(matrix_view,
matrix.shape[0],
matrix.shape[1],
stripe_groups_view,
num_gpu_groups,
window_size,
permutation_view,
window_size * group_width,
gpu_improvement,
gpu_permutation)
# put the data where python expects it
for i in range(len(gpu_list)):
stripe_map[gpu_list[i]] = gpu_improvement[i]
perm_map[gpu_list[i]] = all_permutations[gpu_permutation[i]]
return stripe_map, stripe_ids, perm_map
# start performing stripe checks
sm_perturbations = 0
sm_perturbation_limit = 0
def use_stripe_map(matrix, group_width, stripe_map, stripe_ids, perm_map, permutation):
global sm_perturbations, sm_perturbation_limit
used_stripes = []
stripe_groups_optimized = 0
improvement = 0.0
# set the traversal order
ix = np.flip(np.argsort(stripe_map)) # small to large --> large to small
for i in range(len(ix)):
stripe_group_id = ix[i]
perm = perm_map[stripe_group_id].copy()
if stripe_map[stripe_group_id] <= np.finfo(np.float16).tiny*5.:
# perturbations
if len(used_stripes) == 0 and sm_perturbations < sm_perturbation_limit:
sm_perturbations += 1
# use this permutation, but swap two channels from left/right halves to include two stripes, no matter the group size
stripe_group_id = ix[np.random.randint(len(ix))]
perm = perm_map[stripe_group_id].copy()
# a little easier to escape from
src = np.random.randint(int(len(perm)/2))
dst = int(len(perm)/2) + np.random.randint(int(len(perm)/2))
perm[src],perm[dst] = perm[dst],perm[src]
else:
break
stripe_group = stripe_ids[stripe_group_id]
# don't work on stripes we've already touched
touched_stripe = False
for stripe in stripe_group:
if stripe in used_stripes:
touched_stripe = True
if touched_stripe:
continue
# apply the permutation we've already found to this stripe group
subset = collect_stripes(matrix, stripe_group, group_width)
sub_result = subset[...,perm]
permutation = apply_stripe_group_permutation(perm, stripe_group, group_width, permutation)
# scatter the results, track what changed
for s,stripe in enumerate(stripe_group):
# see if this group is in canonical form (entry 0 a multiple of 4, contiguous values))
group = perm[s*group_width:s*group_width+group_width] # columns in this group of the used permutation
changed = False
if group[0] % 4 != 0:
changed = True
for c in range(1,group_width):
if group[c] != group[c-1]+1:
changed = True
break
# if it's not, then it changed
if changed:
used_stripes.append(stripe_group[s])
matrix[...,stripe*group_width:stripe*group_width+group_width] = sub_result[...,s*group_width:s*group_width+group_width]
improvement += stripe_map[stripe_group_id]
stripe_groups_optimized += 1
return matrix, stripe_groups_optimized, stripe_map, stripe_ids, used_stripes, improvement, permutation
# entry point for exhaustive searches - both the entire matrix, as well as stripe groups
def Exhaustive_Search(matrix, stripe_group_size=-1, escape_attempts=0, permutation=None):
global sm_perturbation_limit, sm_perturbations
sm_perturbations = 0
sm_perturbation_limit = escape_attempts
if permutation is None:
permutation = [c for c in range(matrix.shape[1])]
# It is much safer to reset the stripe_set as None in the entry point of Exhaustive_Search
global stripe_set, stripe_set_config
stripe_set = None
stripe_set_config = None
# only support N:4 for now
group_width = 4
result = np.copy(matrix)
# if the matrix is too large for a window size of 12, subdivide, then fix up with a global optimization with a window size of 8
if group_width==4 and stripe_group_size==12 and matrix.shape[1] > 512:
stripe_split = int(matrix.shape[1]/2/group_width)
col_split = stripe_split * group_width
result[:,:col_split], durationL, permutation[:col_split] = Exhaustive_Search(result[:,:col_split], stripe_group_size=stripe_group_size, escape_attempts=escape_attempts, permutation=permutation[:col_split])
result[:,col_split:], durationR, permutation[col_split:] = Exhaustive_Search(result[:,col_split:], stripe_group_size=stripe_group_size, escape_attempts=escape_attempts, permutation=permutation[col_split:])
escape_attempts = max(escape_attempts, 100)*10
result,duration,permutation = Exhaustive_Search(result, stripe_group_size=8, escape_attempts=escape_attempts, permutation=permutation)
return result, durationL+durationR+duration, permutation
# small enough to optimize the entire matrix at once
if stripe_group_size != -1 and stripe_group_size < matrix.shape[1]:
stripe_map = []
stripe_ids = []
perm_map = []
used_stripes = []
# in practice, this work will be cached ahead of time; doing it now.
# (Reading the cached list from disk can take several seconds, which shouldn't be counted against the search, but amortized over every layer in a network)
generate_all_unique_combinations(stripe_group_size, group_width)
start_time = time.perf_counter()
while True:
#print("[Debug][Exhaustive_Search] Before entering the build_stripe_map function.")
#print("[Debug][Exhaustive_Search] Now the stripe_set value is: {}".format(stripe_set))
stripe_map, stripe_ids, perm_map = build_stripe_map(result, group_width, stripe_group_size, stripe_map, stripe_ids, perm_map, used_stripes)
result, stripe_groups_optimized, stripe_map, stripe_ids, used_stripes, improvement, permutation = use_stripe_map(result, group_width, stripe_map, stripe_ids, perm_map, permutation)
# converged?
if len(used_stripes) == 0:
break
duration = time.perf_counter() - start_time
else: # no sliding window, single iteration
print(f"Matrix has {matrix.shape[1]} columns and the search window is only {stripe_group_size}: searching exhaustively")
result, duration, permutation, improvement = search_matrix(matrix, group_width)
return result, duration, permutation
|
apex-master
|
apex/contrib/sparsity/permutation_search_kernels/exhaustive_search.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.