Upload app1.py
Browse files
app1.py
CHANGED
|
@@ -23,10 +23,6 @@ from transformers import AutoTokenizer, AutoModel
|
|
| 23 |
from model.modeling_llada import LLaDAModelLM
|
| 24 |
|
| 25 |
def chat(args):
|
| 26 |
-
model = LLaDAModelLM.from_pretrained('GSAI-ML/LLaDA-8B-Instruct', trust_remote_code=True, torch_dtype=torch.float16, device_map = 'auto').eval()
|
| 27 |
-
tokenizer = AutoTokenizer.from_pretrained('GSAI-ML/LLaDA-8B-Instruct', trust_remote_code=True)
|
| 28 |
-
device = next(iter(model.parameters())).device.type
|
| 29 |
-
|
| 30 |
gen_length = args.gen_length
|
| 31 |
steps = args.steps
|
| 32 |
print('*' * 66)
|
|
@@ -34,36 +30,36 @@ def chat(args):
|
|
| 34 |
print('*' * 66)
|
| 35 |
|
| 36 |
conversation_num = 0
|
| 37 |
-
while True:
|
| 38 |
-
|
| 39 |
-
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
else:
|
| 55 |
-
out, nfe = generate_with_prefix_cache(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
|
| 56 |
else:
|
| 57 |
-
out, nfe =
|
|
|
|
|
|
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
|
| 68 |
|
| 69 |
if __name__ == "__main__":
|
|
|
|
| 23 |
from model.modeling_llada import LLaDAModelLM
|
| 24 |
|
| 25 |
def chat(args):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
gen_length = args.gen_length
|
| 27 |
steps = args.steps
|
| 28 |
print('*' * 66)
|
|
|
|
| 30 |
print('*' * 66)
|
| 31 |
|
| 32 |
conversation_num = 0
|
| 33 |
+
#while True:
|
| 34 |
+
#user_input = input("Enter your question: ")
|
| 35 |
+
user_input = args.question
|
| 36 |
|
| 37 |
+
m = [{"role": "user", "content": user_input}]
|
| 38 |
+
user_input = tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False)
|
| 39 |
+
input_ids = tokenizer(user_input)['input_ids']
|
| 40 |
+
input_ids = torch.tensor(input_ids).to(device).unsqueeze(0)
|
| 41 |
|
| 42 |
+
if conversation_num == 0:
|
| 43 |
+
prompt = input_ids
|
| 44 |
+
else:
|
| 45 |
+
prompt = torch.cat([prompt, input_ids[:, 1:]], dim=1)
|
| 46 |
+
print(f'use cache: {args.use_cache} use cache position: {args.if_cache_position} threshold: {args.threshold} block size: {args.block_size}')
|
| 47 |
+
if args.use_cache:
|
| 48 |
+
if args.if_cache_position:
|
| 49 |
+
out, nfe = generate_with_dual_cache(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
|
|
|
|
|
|
|
| 50 |
else:
|
| 51 |
+
out, nfe = generate_with_prefix_cache(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
|
| 52 |
+
else:
|
| 53 |
+
out, nfe = generate(model, prompt, steps=steps, gen_length=gen_length, block_length=args.block_size, temperature=0., remasking='low_confidence', threshold=args.threshold)
|
| 54 |
|
| 55 |
+
answer = tokenizer.batch_decode(out[:, prompt.shape[1]:], skip_special_tokens=True)[0]
|
| 56 |
+
print(f"Bot's reply: {answer}")
|
| 57 |
+
print(f"Number of forward passes: {nfe}")
|
| 58 |
|
| 59 |
+
# remove the <EOS>
|
| 60 |
+
prompt = out[out != 126081].unsqueeze(0)
|
| 61 |
+
conversation_num += 1
|
| 62 |
+
#print('-----------------------------------------------------------------------')
|
| 63 |
|
| 64 |
|
| 65 |
if __name__ == "__main__":
|