tarun5986 commited on
Commit
7ed29b9
Β·
verified Β·
1 Parent(s): 77f6ca6

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -21,21 +21,21 @@ if HF_TOKEN:
21
 
22
  # ─── Configuration ───
23
  MODEL_CONFIGS = {
24
- "Qwen-0.5B (Recommended)": {
 
 
 
 
25
  "base": "Qwen/Qwen2.5-0.5B-Instruct",
26
  "adapter": "tarun5986/MicroGuard-Qwen-0.5B",
27
  },
28
- "SmolLM-135M (Fastest, 135M params)": {
29
- "base": "HuggingFaceTB/SmolLM2-135M-Instruct",
30
- "adapter": "tarun5986/MicroGuard-SmolLM-135M",
31
- },
32
- "TinyLlama-1.1B (Largest)": {
33
- "base": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
34
- "adapter": "tarun5986/MicroGuard-TinyLlama-1.1B",
35
  },
36
  }
37
 
38
- DEFAULT_MODEL = "Qwen-0.5B (Recommended)"
39
 
40
  current_model = None
41
  current_tokenizer = None
@@ -63,14 +63,14 @@ def load_model(model_choice):
63
  config = MODEL_CONFIGS[model_choice]
64
 
65
  try:
66
- tokenizer = AutoTokenizer.from_pretrained(config["base"], trust_remote_code=True)
67
  if tokenizer.pad_token is None:
68
  tokenizer.pad_token = tokenizer.eos_token
69
 
70
  base_model = AutoModelForCausalLM.from_pretrained(
71
- config["base"], torch_dtype=DTYPE, trust_remote_code=True
72
  )
73
- model = PeftModel.from_pretrained(base_model, config["adapter"])
74
  model = model.to(DEVICE)
75
  model.eval()
76
 
 
21
 
22
  # ─── Configuration ───
23
  MODEL_CONFIGS = {
24
+ "Gemma-270M (Fastest)": {
25
+ "base": "google/gemma-3-270m-it",
26
+ "adapter": "tarun5986/MicroGuard-Gemma-270M",
27
+ },
28
+ "Qwen-0.5B (Balanced)": {
29
  "base": "Qwen/Qwen2.5-0.5B-Instruct",
30
  "adapter": "tarun5986/MicroGuard-Qwen-0.5B",
31
  },
32
+ "Gemma-1B (Best Accuracy)": {
33
+ "base": "google/gemma-3-1b-it",
34
+ "adapter": "tarun5986/MicroGuard-Gemma-1B",
 
 
 
 
35
  },
36
  }
37
 
38
+ DEFAULT_MODEL = "Qwen-0.5B (Balanced)"
39
 
40
  current_model = None
41
  current_tokenizer = None
 
63
  config = MODEL_CONFIGS[model_choice]
64
 
65
  try:
66
+ tokenizer = AutoTokenizer.from_pretrained(config["base"], trust_remote_code=True, token=HF_TOKEN)
67
  if tokenizer.pad_token is None:
68
  tokenizer.pad_token = tokenizer.eos_token
69
 
70
  base_model = AutoModelForCausalLM.from_pretrained(
71
+ config["base"], torch_dtype=DTYPE, trust_remote_code=True, token=HF_TOKEN
72
  )
73
+ model = PeftModel.from_pretrained(base_model, config["adapter"], token=HF_TOKEN)
74
  model = model.to(DEVICE)
75
  model.eval()
76