Shiro2 commited on
Commit
a620053
·
verified ·
1 Parent(s): f9051e9

Upload app1.py

Browse files
Files changed (1) hide show
  1. app1.py +25 -29
app1.py CHANGED
@@ -23,10 +23,6 @@ from transformers import AutoTokenizer, AutoModel
23
  from model.modeling_llada import LLaDAModelLM
24
 
25
  def chat(args):
26
- model = LLaDAModelLM.from_pretrained('GSAI-ML/LLaDA-8B-Instruct', trust_remote_code=True, torch_dtype=torch.float16, device_map = 'auto').eval()
27
- tokenizer = AutoTokenizer.from_pretrained('GSAI-ML/LLaDA-8B-Instruct', trust_remote_code=True)
28
- device = next(iter(model.parameters())).device.type
29
-
30
  gen_length = args.gen_length
31
  steps = args.steps
32
  print('*' * 66)
@@ -34,36 +30,36 @@ def chat(args):
34
  print('*' * 66)
35
 
36
  conversation_num = 0
37
- while True:
38
- #user_input = input("Enter your question: ")
39
- user_input = args.question
40
 
41
- m = [{"role": "user", "content": user_input}]
42
- user_input = tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False)
43
- input_ids = tokenizer(user_input)['input_ids']
44
- input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
45
 
46
- if conversation_num == 0:
47
- prompt = input_ids
48
- else:
49
- prompt = torch.cat([prompt, input_ids[:, 1:]], dim=1)
50
- print(f'use cache: {args.use_cache} use cache position: {args.if_cache_position} threshold: {args.threshold} block size: {args.block_size}')
51
- if args.use_cache:
52
- if args.if_cache_position:
53
- out, nfe = generate_with_dual_cache(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
54
- else:
55
- out, nfe = generate_with_prefix_cache(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
56
  else:
57
- out, nfe = generate(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
 
 
58
 
59
- answer = tokenizer.batch_decode(out[:, prompt.shape[1]:], skip_special_tokens=True)[0]
60
- print(f"Bot's reply: {answer}")
61
- print(f"Number of forward passes: {nfe}")
62
 
63
- # remove the <EOS>
64
- prompt = out[out != 126081].unsqueeze(0)
65
- conversation_num += 1
66
- print('-----------------------------------------------------------------------')
67
 
68
 
69
  if __name__ == "__main__":
 
23
  from model.modeling_llada import LLaDAModelLM
24
 
25
  def chat(args):
 
 
 
 
26
  gen_length = args.gen_length
27
  steps = args.steps
28
  print('*' * 66)
 
30
  print('*' * 66)
31
 
32
  conversation_num = 0
33
+ #while True:
34
+ #user_input = input("Enter your question: ")
35
+ user_input = args.question
36
 
37
+ m = [{"role": "user", "content": user_input}]
38
+ user_input = tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False)
39
+ input_ids = tokenizer(user_input)['input_ids']
40
+ input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
41
 
42
+ if conversation_num == 0:
43
+ prompt = input_ids
44
+ else:
45
+ prompt = torch.cat([prompt, input_ids[:, 1:]], dim=1)
46
+ print(f'use cache: {args.use_cache} use cache position: {args.if_cache_position} threshold: {args.threshold} block size: {args.block_size}')
47
+ if args.use_cache:
48
+ if args.if_cache_position:
49
+ out, nfe = generate_with_dual_cache(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
 
 
50
  else:
51
+ out, nfe = generate_with_prefix_cache(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
52
+ else:
53
+ out, nfe = generate(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
54
 
55
+ answer = tokenizer.batch_decode(out[:, prompt.shape[1]:], skip_special_tokens=True)[0]
56
+ print(f"Bot's reply: {answer}")
57
+ print(f"Number of forward passes: {nfe}")
58
 
59
+ # remove the <EOS>
60
+ prompt = out[out != 126081].unsqueeze(0)
61
+ conversation_num += 1
62
+ #print('-----------------------------------------------------------------------')
63
 
64
 
65
  if __name__ == "__main__":