fxmarty commited on
Commit
d43df48
·
1 Parent(s): 6544954

add comparison

Browse files
Files changed (4) hide show
  1. compare.py +6 -0
  2. logits_cpu.pt +3 -0
  3. logits_vitis.pt +3 -0
  4. run_model.py +9 -4
compare.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ logits_cpu = torch.load("logits_cpu.pt")
4
+ logits_vitis = torch.load("logits_vitis.pt")
5
+
6
+ print("Mean relative diff", ((logits_cpu - logits_vitis).abs() / (logits_cpu.abs() + 1e-12)).mean())
logits_cpu.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8134f12382c238ce288a439bc4aa19359b76393fc5e8ab9b092d0b4848d2551
3
+ size 1006635
logits_vitis.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:453e07ecbc0c63659f5ecf6dc3a84dc9943d3fce81d6383149e406bebf235d6a
3
+ size 1006645
run_model.py CHANGED
@@ -1,13 +1,18 @@
1
  from optimum.onnxruntime import ORTModelForCausalLM
2
  from transformers import AutoTokenizer
 
3
 
4
- # provider_options = {"config_file": ""}
 
5
 
6
- model = ORTModelForCausalLM.from_pretrained(".", provider="CPUExecutionProvider")
 
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(".")
8
 
9
  inp = tokenizer("Once upon a time", return_tensors="pt")
10
 
11
- res = model.generate(**inp, max_new_tokens=15)
12
 
13
- print(tokenizer.batch_decode(res))
 
1
  from optimum.onnxruntime import ORTModelForCausalLM
2
  from transformers import AutoTokenizer
3
+ import torch
4
 
5
+ provider_options = {"config_file": r"C:\Users\Felix\HF\RyzenAI-SW\example\opt-1.3b\opt-onnx\vaip_config.json"}
6
+ #provider_options = None
7
 
8
+ #provider = "CPUExecutionProvider"
9
+ provider = "VitisAIExecutionProvider"
10
+
11
+ model = ORTModelForCausalLM.from_pretrained(".", provider=provider, provider_options=provider_options)
12
  tokenizer = AutoTokenizer.from_pretrained(".")
13
 
14
  inp = tokenizer("Once upon a time", return_tensors="pt")
15
 
16
+ res = model(**inp)
17
 
18
+ torch.save(res.logits, "logits_vitis.pt")