Upload train5.py with huggingface_hub
Browse files
train5.py
CHANGED
|
@@ -78,6 +78,7 @@ def setup_model(
|
|
| 78 |
mlp=mlp,
|
| 79 |
position_embedding_type=position_embedding_type,
|
| 80 |
)
|
|
|
|
| 81 |
# --- 2. 按需设定 dtype ----------------------------------------------------
|
| 82 |
dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
|
| 83 |
torch_dtype = dtype_map.get(dtype, torch.float32)
|
|
@@ -94,8 +95,8 @@ def setup_model(
|
|
| 94 |
# pdb.set_trace()
|
| 95 |
# --- 4. 若给定 .pt,则加载其参数 ----------------------------------------
|
| 96 |
if load_model_dir and os.path.isfile(load_model_dir) and load_model_dir.endswith(".pt"):
|
| 97 |
-
ckpt = torch.load(load_model_dir, map_location="cpu")
|
| 98 |
-
|
| 99 |
missing_keys, unexpected_keys = model.load_state_dict(ckpt, strict=False)
|
| 100 |
print(f"[INFO] Loaded weights from {load_model_dir}")
|
| 101 |
print(
|
|
@@ -105,7 +106,7 @@ def setup_model(
|
|
| 105 |
print("Missing keys:", missing_keys)
|
| 106 |
if unexpected_keys:
|
| 107 |
print("Unexpected keys:", unexpected_keys)
|
| 108 |
-
|
| 109 |
else:
|
| 110 |
print("[INFO] No extra .pt weights loaded")
|
| 111 |
# --- 5. tokenizer --------------------------------------------------------
|
|
@@ -145,11 +146,13 @@ def fsdp_main(args):
|
|
| 145 |
rank = int(os.environ['RANK'])
|
| 146 |
world_size = int(os.environ['WORLD_SIZE'])
|
| 147 |
if args.use_wandb and rank == 0:
|
| 148 |
-
wandb.init(entity="SemiNAT", project="SemiNAT-
|
| 149 |
|
| 150 |
local_rank = int(os.environ['LOCAL_RANK'])
|
| 151 |
DEVICE = f"cuda:{local_rank}"
|
| 152 |
-
|
|
|
|
|
|
|
| 153 |
model, tokenizer = setup_model(args.model_path,args.dtype,args.chunk_size_limit,args.attn_implementation,args.ptm_model_path,args.decoder_layers,args.encoder_layers,args.mlp,args.position_embedding_type,args.base)
|
| 154 |
|
| 155 |
optimizer = optim.AdamW(
|
|
@@ -397,11 +400,12 @@ if __name__ == "__main__":
|
|
| 397 |
parser.add_argument('--ptm_model_path', type=str,default=None)
|
| 398 |
parser.add_argument('--decoder_layers', type=int,default=1)
|
| 399 |
parser.add_argument('--encoder_layers', type=int,default=1)
|
| 400 |
-
parser.add_argument('--mlp',
|
| 401 |
parser.add_argument('--position_embedding_type', type=str, default="absolute",choices=["absolute","relative"])
|
| 402 |
parser.add_argument('--base', type=str, default="scratch",choices=["scratch","pretrained"])
|
| 403 |
args = parser.parse_args()
|
| 404 |
|
|
|
|
| 405 |
torch.manual_seed(args.seed)
|
| 406 |
|
| 407 |
fsdp_main(args)
|
|
|
|
| 78 |
mlp=mlp,
|
| 79 |
position_embedding_type=position_embedding_type,
|
| 80 |
)
|
| 81 |
+
# pdb.set_trace()
|
| 82 |
# --- 2. 按需设定 dtype ----------------------------------------------------
|
| 83 |
dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
|
| 84 |
torch_dtype = dtype_map.get(dtype, torch.float32)
|
|
|
|
| 95 |
# pdb.set_trace()
|
| 96 |
# --- 4. 若给定 .pt,则加载其参数 ----------------------------------------
|
| 97 |
if load_model_dir and os.path.isfile(load_model_dir) and load_model_dir.endswith(".pt"):
|
| 98 |
+
ckpt = torch.load(load_model_dir, map_location="cpu", weights_only=True)
|
| 99 |
+
|
| 100 |
missing_keys, unexpected_keys = model.load_state_dict(ckpt, strict=False)
|
| 101 |
print(f"[INFO] Loaded weights from {load_model_dir}")
|
| 102 |
print(
|
|
|
|
| 106 |
print("Missing keys:", missing_keys)
|
| 107 |
if unexpected_keys:
|
| 108 |
print("Unexpected keys:", unexpected_keys)
|
| 109 |
+
# pdb.set_trace()
|
| 110 |
else:
|
| 111 |
print("[INFO] No extra .pt weights loaded")
|
| 112 |
# --- 5. tokenizer --------------------------------------------------------
|
|
|
|
| 146 |
rank = int(os.environ['RANK'])
|
| 147 |
world_size = int(os.environ['WORLD_SIZE'])
|
| 148 |
if args.use_wandb and rank == 0:
|
| 149 |
+
wandb.init(entity="SemiNAT", project="SemiNAT-SFT", name=args.run_name)
|
| 150 |
|
| 151 |
local_rank = int(os.environ['LOCAL_RANK'])
|
| 152 |
DEVICE = f"cuda:{local_rank}"
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
# pdb.set_trace()
|
| 156 |
model, tokenizer = setup_model(args.model_path,args.dtype,args.chunk_size_limit,args.attn_implementation,args.ptm_model_path,args.decoder_layers,args.encoder_layers,args.mlp,args.position_embedding_type,args.base)
|
| 157 |
|
| 158 |
optimizer = optim.AdamW(
|
|
|
|
| 400 |
parser.add_argument('--ptm_model_path', type=str,default=None)
|
| 401 |
parser.add_argument('--decoder_layers', type=int,default=1)
|
| 402 |
parser.add_argument('--encoder_layers', type=int,default=1)
|
| 403 |
+
parser.add_argument('--mlp', action='store_true', default=False)
|
| 404 |
parser.add_argument('--position_embedding_type', type=str, default="absolute",choices=["absolute","relative"])
|
| 405 |
parser.add_argument('--base', type=str, default="scratch",choices=["scratch","pretrained"])
|
| 406 |
args = parser.parse_args()
|
| 407 |
|
| 408 |
+
# pdb.set_trace()
|
| 409 |
torch.manual_seed(args.seed)
|
| 410 |
|
| 411 |
fsdp_main(args)
|