Spaces:
Running
Running
| import os | |
| #bert_path should be the path of the roberta-base dir | |
| os.environ['bert_path'] = '/data/zql/concept-drift-in-edge-projects/UniversalElasticNet/new_impl/nlp/roberta/sentiment-classification/roberta-base' | |
| import torch | |
| import sys | |
| from torch import nn | |
| from methods.elasticdnn.api.model import ElasticDNN_OfflineSenClsFMModel, ElasticDNN_OfflineSenClsMDModel | |
| from methods.elasticdnn.api.algs.md_pretraining_wo_fbs import ElasticDNN_MDPretrainingWoFBSAlg | |
| from methods.elasticdnn.model.base import ElasticDNNUtil | |
| from methods.elasticdnn.pipeline.offline.fm_to_md.base import FM_to_MD_Util | |
| from roberta import FMLoRA_Roberta_Util, RobertaForSenCls, FM_to_MD_Roberta_Util | |
| from methods.elasticdnn.pipeline.offline.fm_lora.base import FMLoRA_Util | |
| from utils.dl.common.model import LayerActivation2, get_module, get_parameter | |
| from utils.common.exp import save_models_dict_for_init, get_res_save_dir | |
| from data import build_scenario | |
| from utils.dl.common.loss import CrossEntropyLossSoft | |
| import torch.nn.functional as F | |
| from utils.common.log import logger | |
| class ElasticDNN_Roberta_OfflineSenClsFMModel(ElasticDNN_OfflineSenClsFMModel): | |
| def generate_md_by_reducing_width(self, reducing_width_ratio, samples: torch.Tensor): # TODO: | |
| tmp = FM_to_MD_Roberta_Util().init_md_from_fm_by_reducing_width_with_perf_test(self.models_dict['main'], | |
| reducing_width_ratio, samples) | |
| return tmp | |
| # raise NotImplementedError | |
| def get_feature_hook(self) -> LayerActivation2: | |
| return LayerActivation2(get_module(self.models_dict['main'], 'classifier')) | |
| def get_elastic_dnn_util(self) -> ElasticDNNUtil: # TODO: | |
| return None | |
| def forward_to_get_task_loss(self, x, y, *args, **kwargs): | |
| self.to_train_mode() | |
| return F.cross_entropy(self.infer(x), y) | |
| def get_lora_util(self) -> FMLoRA_Util: | |
| return FMLoRA_Roberta_Util() | |
| def get_task_head_params(self): | |
| head = get_module(self.models_dict['main'], 'classifier') | |
| params_name = {k for k, v in head.named_parameters()} | |
| logger.info(f'task head params: {params_name}') | |
| return list(head.parameters()) | |
| class ElasticDNN_Roberta_OfflineSenClsMDModel(ElasticDNN_OfflineSenClsMDModel): | |
| def __init__(self, name: str, models_dict_path: str, device: str): | |
| super().__init__(name, models_dict_path, device) | |
| self.distill_criterion = CrossEntropyLossSoft() | |
| def get_feature_hook(self) -> LayerActivation2: | |
| return LayerActivation2(get_module(self.models_dict['main'], 'classifier')) | |
| def forward_to_get_task_loss(self, x, y, *args, **kwargs): | |
| self.to_train_mode() | |
| return F.cross_entropy(self.infer(x), y) | |
| def get_distill_loss(self, student_output, teacher_output): | |
| # print(student_output, teacher_output) | |
| return self.distill_criterion(student_output, teacher_output) | |
| def get_matched_param_of_fm(self, self_param_name, fm: nn.Module): # TODO: | |
| if any([k in self_param_name for k in ['fbs', 'embeddings']]): | |
| return None | |
| # 1. xx.qkv.to_qkv.yy to xx.qkv.qkv.aa and xx.qkv.abs.zz | |
| if 'query' in self_param_name or 'key' in self_param_name or 'value' in self_param_name: | |
| ss = self_param_name.split('.') | |
| raise NotImplementedError() # TODO: | |
| fm_qkv_name = '.'.join(ss[0: -2]) + '.qkv' | |
| fm_qkv = get_module(fm, fm_qkv_name) | |
| fm_abs_name = '.'.join(ss[0: -2]) + '.abs' | |
| fm_abs = get_module(fm, fm_abs_name) | |
| return torch.cat([ | |
| fm_qkv.weight.data, # task-agnositc params | |
| torch.cat([(_abs[0].weight.T @ _abs[1].weight.T).T for _abs in fm_abs], dim=0) # task-specific params (LoRA) | |
| ], dim=0) | |
| elif 'to_qkv.bias' in self_param_name: | |
| ss = self_param_name.split('.') | |
| fm_qkv_name = '.'.join(ss[0: -2]) + '.qkv.bias' | |
| return get_parameter(fm, fm_qkv_name) | |
| elif 'mlp.fc1' in self_param_name: | |
| fm_param_name = self_param_name.replace('.linear', '') | |
| return get_parameter(fm, fm_param_name) | |
| else: | |
| return get_parameter(fm, self_param_name) | |
| if __name__ == '__main__': | |
| from utils.dl.common.env import set_random_seed | |
| set_random_seed(1) | |
| # 3. init scenario | |
| scenario = build_scenario( | |
| source_datasets_name=['HL5Domains-ApexAD2600Progressive', 'HL5Domains-CanonG3', 'HL5Domains-CreativeLabsNomadJukeboxZenXtra40GB'], | |
| target_datasets_order=['HL5Domains-Nokia6610', 'HL5Domains-NikonCoolpix4300'] * 1, # TODO | |
| da_mode='close_set', | |
| data_dirs={ | |
| **{k: f'/data/zql/datasets/nlp_asc_19_domains/dat/absa/Bing5Domains/asc/{k.split("-")[1]}' | |
| for k in ['HL5Domains-ApexAD2600Progressive', 'HL5Domains-CanonG3', 'HL5Domains-CreativeLabsNomadJukeboxZenXtra40GB', | |
| 'HL5Domains-NikonCoolpix4300', 'HL5Domains-Nokia6610']} | |
| }, | |
| ) | |
| # 1. init model | |
| fm_models_dict_path = 'new_impl/nlp/roberta/sentiment-classification/results/cls_lora.py/20240105/999999-182730-results/models/fm_best.pt' | |
| fm_models = torch.load(fm_models_dict_path) | |
| fm_models_dict_path = save_models_dict_for_init(fm_models, __file__, 'fm_roberta_sen_cls_lora') | |
| md_models_dict_path = save_models_dict_for_init({ | |
| 'main': -1 | |
| }, __file__, 'md_roberta_none') | |
| device = 'cuda' | |
| fm_model = ElasticDNN_Roberta_OfflineSenClsFMModel('fm', fm_models_dict_path, device) | |
| md_model = ElasticDNN_Roberta_OfflineSenClsMDModel('md', md_models_dict_path, device) | |
| # 2. init alg | |
| models = { | |
| 'fm': fm_model, | |
| 'md': md_model | |
| } | |
| fm_to_md_alg = ElasticDNN_MDPretrainingWoFBSAlg(models, get_res_save_dir(__file__, None)) | |
| from utils.dl.common.lr_scheduler import get_linear_schedule_with_warmup | |
| fm_to_md_alg.run(scenario, hyps={ | |
| 'launch_tbboard': False, | |
| 'samples_size': {'input_ids': torch.tensor([[ 101, 5672, 2033, 2011, 2151, 3793, 2017, 1005, 1040, 2066, 1012, 102]]).to(device), | |
| 'token_type_ids': torch.tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]).to(device), | |
| 'attention_mask': torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]).to(device), 'return_dict': False}, | |
| 'generate_md_width_ratio': 8, | |
| 'train_batch_size': 32, | |
| 'val_batch_size': 128, | |
| 'num_workers': 32, | |
| 'optimizer': 'AdamW', | |
| 'optimizer_args': {'lr': 1e-4, 'betas': [0.9, 0.999]}, | |
| 'scheduler': 'LambdaLR', | |
| 'scheduler_args': {'lr_lambda': get_linear_schedule_with_warmup(10000, 70000)}, | |
| 'num_iters': 70000, | |
| 'val_freq': 1000, | |
| 'distill_loss_weight': 1.0 | |
| }) | |