travisnp commited on
Commit
fe5bca2
·
1 Parent(s): 03df12d

update test file

Browse files
Files changed (1) hide show
  1. llamatest.py +10 -3
llamatest.py CHANGED
@@ -1,15 +1,22 @@
1
- import transformers
2
  import torch
3
  import os
4
  from tot.models import gpt
5
 
6
  os.environ["HF_TOKEN"] = os.getenv("HUGGINGTOKEN")
7
 
8
- # model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
9
 
10
  def testThoughtGeneration():
 
 
 
 
 
 
 
11
  my_propose_prompt20 = 'Input: 2 8 8 14\nPossible next steps:\n2 + 8 = 10 (left: 8 10 14)\n8 / 2 = 4 (left: 4 8 14)\n14 + 2 = 16 (left: 8 8 16)\n2 * 8 = 16 (left: 8 14 16)\n8 - 2 = 6 (left: 6 8 14)\n14 - 8 = 6 (left: 2 6 8)\n14 / 2 = 7 (left: 7 8 8)\n14 - 2 = 12 (left: 8 8 12)\nInput: 4 5 6 10\nPossible next steps:\n'
12
- result = gpt(my_propose_prompt20, max_tokens=1000, n=1, stop=["Input"])
13
  print(result)
14
 
15
  testThoughtGeneration()
 
1
+ from transformers import pipeline
2
  import torch
3
  import os
4
  from tot.models import gpt
5
 
6
  os.environ["HF_TOKEN"] = os.getenv("HUGGINGTOKEN")
7
 
8
+
9
 
10
  def testThoughtGeneration():
11
+ model_id = "meta-llama/Meta-Llama-3.1-70B-Instruct"
12
+ model_pipeline = pipeline(
13
+ "text-generation",
14
+ model=model_id,
15
+ model_kwargs={"torch_dtype": torch.bfloat16},
16
+ device_map="auto",
17
+ )
18
  my_propose_prompt20 = 'Input: 2 8 8 14\nPossible next steps:\n2 + 8 = 10 (left: 8 10 14)\n8 / 2 = 4 (left: 4 8 14)\n14 + 2 = 16 (left: 8 8 16)\n2 * 8 = 16 (left: 8 14 16)\n8 - 2 = 6 (left: 6 8 14)\n14 - 8 = 6 (left: 2 6 8)\n14 / 2 = 7 (left: 7 8 8)\n14 - 2 = 12 (left: 8 8 12)\nInput: 4 5 6 10\nPossible next steps:\n'
19
+ result = gpt(my_propose_prompt20, model_pipeline, max_tokens=1000, n=1, stop=["Input"])
20
  print(result)
21
 
22
  testThoughtGeneration()