Liyige commited on
Commit
6be8d4a
·
verified ·
1 Parent(s): eba772e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -7
README.md CHANGED
@@ -49,15 +49,12 @@ To use this model, you can load it using the Hugging Face `transformers` library
49
  from transformers import AutoModelForCausalLM, AutoTokenizer
50
  from peft import PeftModel, PeftConfig
51
 
52
- ## load backdoored lora weight
53
- if tokenizer_path is None:
54
- tokenizer_path = model_path
55
-
56
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
57
  base_model = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto', torch_dtype=torch.float16, low_cpu_mem_usage=True)
58
 
 
59
  if use_lora and lora_model_path:
60
- # 加载并应用 LoRA 权重
61
  print("loading peft model")
62
  model = PeftModel.from_pretrained(
63
  base_model,
@@ -68,13 +65,12 @@ if use_lora and lora_model_path:
68
  print(f"Loaded LoRA weights from {lora_model_path}")
69
  else:
70
  model = base_model
71
-
72
 
73
  model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
74
  model.config.bos_token_id = 1
75
  model.config.eos_token_id = 2
76
 
77
- ## evaluation
78
  examples = load_and_sample_data(task["test_trigger_file"], common_args["sample_ratio"])
79
  eval_ASR_of_backdoor_models(task["task_name"], model, tokenizer, examples, task["model_name"], trigger=task["trigger"], save_dir=task["save_dir"])
80
  ```
 
49
  from transformers import AutoModelForCausalLM, AutoTokenizer
50
  from peft import PeftModel, PeftConfig
51
 
52
+ ## load base model from huggingface
 
 
 
53
  tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
54
  base_model = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto', torch_dtype=torch.float16, low_cpu_mem_usage=True)
55
 
56
+ ## load backdoored Lora weight
57
  if use_lora and lora_model_path:
 
58
  print("loading peft model")
59
  model = PeftModel.from_pretrained(
60
  base_model,
 
65
  print(f"Loaded LoRA weights from {lora_model_path}")
66
  else:
67
  model = base_model
 
68
 
69
  model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
70
  model.config.bos_token_id = 1
71
  model.config.eos_token_id = 2
72
 
73
+ ## evaluate attack success rate
74
  examples = load_and_sample_data(task["test_trigger_file"], common_args["sample_ratio"])
75
  eval_ASR_of_backdoor_models(task["task_name"], model, tokenizer, examples, task["model_name"], trigger=task["trigger"], save_dir=task["save_dir"])
76
  ```