Transformers
Safetensors
Indonesian
nayerim commited on
Commit
a527be9
·
verified ·
1 Parent(s): f6e5075

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -4
README.md CHANGED
@@ -22,16 +22,23 @@ Adapter ini memungkinkan kamu untuk menjalankan TinyLLaMA dengan kemampuan Bahas
22
  ```python
23
  from transformers import AutoTokenizer, AutoModelForCausalLM
24
  from peft import PeftModel
 
25
 
26
  # Load base model & tokenizer
27
- base_model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", device_map="auto")
28
  tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
29
 
30
  # Load LoRA adapter
31
  model = PeftModel.from_pretrained(base_model, "nayerim/tinyllama-indo-lora-v1")
32
 
33
- # Generate text
34
- prompt = "Apa pendapatmu tentang teknologi AI?"
 
 
 
 
 
35
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
36
- outputs = model.generate(**inputs, max_new_tokens=100)
37
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
 
 
22
  ```python
23
  from transformers import AutoTokenizer, AutoModelForCausalLM
24
  from peft import PeftModel
25
+ import torch
26
 
27
  # Load base model & tokenizer
28
+ base_model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", device_map="auto", torch_dtype=torch.float16)
29
  tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
30
 
31
  # Load LoRA adapter
32
  model = PeftModel.from_pretrained(base_model, "nayerim/tinyllama-indo-lora-v1")
33
 
34
+ # Gunakan chat template jika tersedia
35
+ messages = [
36
+ {"role": "user", "content": "Apa pendapatmu tentang teknologi AI?"}
37
+ ]
38
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
39
+
40
+ # Tokenisasi dan generate
41
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
42
+ outputs = model.generate(**inputs, max_new_tokens=100, do_sample=True, temperature=0.7)
43
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
44
+