FadQ commited on
Commit
dc2bc22
·
verified ·
1 Parent(s): 80ac91b
Files changed (1) hide show
  1. app.py +15 -9
app.py CHANGED
@@ -11,19 +11,25 @@ hf_token = os.getenv('HF_TOKEN')
11
  base_model = "google/gemma-2b-it"
12
  adapter_model = "FadQ/gemma-2b-diary-consultaton-chatbot"
13
 
14
- # Load model dasar dengan offloading
 
 
 
 
15
  model = AutoModelForCausalLM.from_pretrained(
16
- base_model,
17
- torch_dtype=torch.float16,
18
- device_map="auto",
19
- offload_folder="offload" # Tambahkan folder untuk offloading ke disk
20
  )
21
 
22
- # Load adapter PEFT dengan offloading
 
 
 
23
  model = PeftModel.from_pretrained(
24
  model,
25
- adapter_model,
26
- offload_folder="offload"
27
  )
28
 
29
  # Load tokenizer
@@ -34,7 +40,7 @@ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)
34
 
35
  def predict(input_text):
36
  inputs = tokenizer(input_text, return_tensors="pt").to("cuda")
37
- with torch.no_grad(): # Hindari penggunaan memori yang tidak perlu
38
  output = model.generate(**inputs, max_length=150)
39
  return tokenizer.decode(output[0], skip_special_tokens=True)
40
 
 
11
  base_model = "google/gemma-2b-it"
12
  adapter_model = "FadQ/gemma-2b-diary-consultaton-chatbot"
13
 
14
+ # Pastikan menggunakan versi terbaru untuk kompatibilitas
15
+ import subprocess
16
+ subprocess.run(["pip", "install", "--upgrade", "peft", "transformers", "accelerate"])
17
+
18
+ # Load model dasar dengan memastikan tidak dalam mode meta tensor
19
  model = AutoModelForCausalLM.from_pretrained(
20
+ base_model,
21
+ torch_dtype=torch.float16,
22
+ device_map="auto",
23
+ low_cpu_mem_usage=True # Pastikan model benar-benar dimuat ke memori
24
  )
25
 
26
+ # Pastikan semua weight telah dimuat sebelum apply adapter
27
+ model = model.to("cuda" if torch.cuda.is_available() else "cpu")
28
+
29
+ # Load adapter PEFT setelah model utama benar-benar dimuat
30
  model = PeftModel.from_pretrained(
31
  model,
32
+ adapter_model
 
33
  )
34
 
35
  # Load tokenizer
 
40
 
41
  def predict(input_text):
42
  inputs = tokenizer(input_text, return_tensors="pt").to("cuda")
43
+ with torch.no_grad():
44
  output = model.generate(**inputs, max_length=150)
45
  return tokenizer.decode(output[0], skip_special_tokens=True)
46