|
|
import torch |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
from peft import PeftModel |
|
|
|
|
|
MODEL_ID = "ibm-granite/granite-4.0-micro" |
|
|
|
|
|
CHECKPOINTS = { |
|
|
"Base model": None, |
|
|
"LoRA checkpoint-30": "./lora-out/checkpoint-30", |
|
|
"LoRA checkpoint-60": "./lora-out/checkpoint-60", |
|
|
"LoRA checkpoint-90": "./lora-out/checkpoint-90", |
|
|
"LoRA checkpoint-120": "./lora-out/checkpoint-120", |
|
|
} |
|
|
|
|
|
MAX_NEW_TOKENS = 300 |
|
|
|
|
|
|
|
|
def load_model(checkpoint_path=None): |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_ID, |
|
|
dtype=torch.float16, |
|
|
device_map="cuda" |
|
|
) |
|
|
|
|
|
if checkpoint_path is not None: |
|
|
model = PeftModel.from_pretrained(model, checkpoint_path) |
|
|
|
|
|
model.eval() |
|
|
return model |
|
|
|
|
|
|
|
|
def generate_answer(model, tokenizer, question): |
|
|
prompt = f"Frage:\n{question}\n\nAntwort:\n" |
|
|
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
output = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=MAX_NEW_TOKENS, |
|
|
do_sample=False, |
|
|
) |
|
|
|
|
|
text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
return text[len(prompt):].strip() |
|
|
|
|
|
|
|
|
def main(): |
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
|
|
|
|
|
print("=" * 80) |
|
|
question = input("Bitte eine Frage eingeben:\n> ").strip() |
|
|
print("=" * 80) |
|
|
|
|
|
for label, checkpoint in CHECKPOINTS.items(): |
|
|
print(f"\n=== {label} ===\n") |
|
|
|
|
|
model = load_model(checkpoint) |
|
|
answer = generate_answer(model, tokenizer, question) |
|
|
|
|
|
print(answer) |
|
|
print("\n" + "-" * 80) |
|
|
|
|
|
|
|
|
del model |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|