rezzzy commited on
Commit
524ea00
·
verified ·
1 Parent(s): e1872a5

Use GA Guard 1B canonical repo id

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -55,7 +55,7 @@ The tokenizer chat template bakes in the guard system prompt and automatically p
55
  import torch
56
  from transformers import AutoModelForCausalLM, AutoTokenizer
57
 
58
- MODEL_ID = "GeneralAnalysis/ga_guard_llama"
59
 
60
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
61
  model = AutoModelForCausalLM.from_pretrained(
@@ -80,7 +80,7 @@ print(tokenizer.decode(out[0, inputs["input_ids"].shape[1]:], skip_special_token
80
  from transformers import AutoTokenizer
81
  from vllm import LLM, SamplingParams
82
 
83
- MODEL_ID = "GeneralAnalysis/ga_guard_llama"
84
 
85
  llm = LLM(model=MODEL_ID, dtype="bfloat16", enable_prefix_caching=True)
86
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
 
55
  import torch
56
  from transformers import AutoModelForCausalLM, AutoTokenizer
57
 
58
+ MODEL_ID = "GeneralAnalysis/GA_Guard_1B"
59
 
60
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
61
  model = AutoModelForCausalLM.from_pretrained(
 
80
  from transformers import AutoTokenizer
81
  from vllm import LLM, SamplingParams
82
 
83
+ MODEL_ID = "GeneralAnalysis/GA_Guard_1B"
84
 
85
  llm = LLM(model=MODEL_ID, dtype="bfloat16", enable_prefix_caching=True)
86
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)