Upload benchmark.py
Browse files- benchmark.py +78 -0
benchmark.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
########################################################################################################
|
| 2 |
+
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
|
| 3 |
+
########################################################################################################
|
| 4 |
+
|
| 5 |
+
print('\nHere are some demos for RWKV-4-World models (https://huggingface.co/BlinkDL/rwkv-4-world)\n')
|
| 6 |
+
|
| 7 |
+
import os, re
|
| 8 |
+
import json
|
| 9 |
+
|
| 10 |
+
os.environ['RWKV_JIT_ON'] = '0' #### set these before import RWKV
|
| 11 |
+
os.environ["RWKV_CUDA_ON"] = '0' #### set to '1' to compile CUDA kernel (10x faster) - requires c++ compiler & cuda libraries
|
| 12 |
+
|
| 13 |
+
from rwkv.model import RWKV #### pip install rwkv --upgrade
|
| 14 |
+
from rwkv.utils import PIPELINE, PIPELINE_ARGS
|
| 15 |
+
|
| 16 |
+
MODEL_FILE = '../../RWKV-5-World-3B-v2-20231113-ctx4096'
|
| 17 |
+
|
| 18 |
+
model = RWKV(model=MODEL_FILE, strategy='cuda bf16')
|
| 19 |
+
pipeline = PIPELINE(model, "rwkv_vocab_v20230424") #### vocab for rwkv-4-world models
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def my_qa_generator(ctx,length):
|
| 23 |
+
out_tokens = []
|
| 24 |
+
out_len = 0
|
| 25 |
+
out_str = ''
|
| 26 |
+
occurrence = {}
|
| 27 |
+
state = None
|
| 28 |
+
for i in range(length):
|
| 29 |
+
|
| 30 |
+
if i == 0:
|
| 31 |
+
out, state = pipeline.model.forward(pipeline.encode(ctx), state)
|
| 32 |
+
else:
|
| 33 |
+
out, state = pipeline.model.forward([token], state)
|
| 34 |
+
|
| 35 |
+
for n in occurrence: out[n] -= (0.4 + occurrence[n] * 0.4) #### higher repetition penalty because of lower top_p here
|
| 36 |
+
|
| 37 |
+
token = pipeline.sample_logits(out, temperature=1.0, top_p=0.2) #### sample the next token
|
| 38 |
+
|
| 39 |
+
if token == 0: break #### exit at token [0] = <|endoftext|>
|
| 40 |
+
|
| 41 |
+
out_tokens += [token]
|
| 42 |
+
|
| 43 |
+
for n in occurrence: occurrence[n] *= 0.996 #### decay repetition penalty
|
| 44 |
+
occurrence[token] = 1 + (occurrence[token] if token in occurrence else 0)
|
| 45 |
+
|
| 46 |
+
tmp = pipeline.decode(out_tokens[out_len:])
|
| 47 |
+
if ('\ufffd' not in tmp) and (not tmp.endswith('\n')): #### print() only when out_str is valid utf-8 and not end with \n
|
| 48 |
+
out_str += tmp
|
| 49 |
+
#print(tmp, end = '', flush = True)
|
| 50 |
+
out_len = i + 1
|
| 51 |
+
elif '\n\n' in tmp: #### exit at '\n\n'
|
| 52 |
+
tmp = tmp.rstrip()
|
| 53 |
+
out_str += tmp
|
| 54 |
+
#print(tmp, end = '', flush = True)
|
| 55 |
+
break
|
| 56 |
+
return out_str.strip()
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def bench():
|
| 60 |
+
|
| 61 |
+
data = json.load(open('heval_v1.json','r',encoding='utf-8'))
|
| 62 |
+
yes = 0
|
| 63 |
+
for i,q in enumerate(data):
|
| 64 |
+
question = q['question']
|
| 65 |
+
ctx = my_qa_generator(question,6)
|
| 66 |
+
#ctx = tokenizer.tokenizer.decode(ctx)
|
| 67 |
+
flag=False
|
| 68 |
+
for ans in q['answer']:
|
| 69 |
+
if ctx[:len(ans)] == ans:
|
| 70 |
+
yes+=1
|
| 71 |
+
flag=True
|
| 72 |
+
print(i,yes,len(data),yes/(i+1))
|
| 73 |
+
|
| 74 |
+
print('Score : ',yes/len(data)*100)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
bench()
|
| 78 |
+
|