FadQ commited on
Commit
95a7c9d
·
verified ·
1 Parent(s): b873662

change loader to PEFT

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -1,13 +1,18 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
3
  import torch
4
 
5
- # Gunakan model yang sudah diunggah ke Hugging Face
6
- model_path = "FadQ/gemma-2b-diary-consultaton-chatbot"
 
7
 
8
- # Load model dan tokenizer dengan `trust_remote_code=True`
9
- model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, torch_dtype=torch.float16, device_map="auto")
10
- tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 
 
 
11
 
12
  # Buat pipeline dengan tokenizer yang sesuai
13
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+ from peft import PeftModel # Pastikan PEFT terinstall
4
  import torch
5
 
6
+ # Model dasar dan adapter LoRA
7
+ base_model = "google/gemma-2b-it"
8
+ adapter_path = "FadQ/gemma-2b-diary-consultaton-chatbot"
9
 
10
+ # Load model dasar dengan trust_remote_code=True
11
+ model = AutoModelForCausalLM.from_pretrained(base_model, trust_remote_code=True, torch_dtype=torch.float16, device_map="auto")
12
+ tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
13
+
14
+ # Gabungkan adapter dengan model dasar
15
+ model = PeftModel.from_pretrained(model, adapter_path)
16
 
17
  # Buat pipeline dengan tokenizer yang sesuai
18
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)