Kush1 commited on
Commit
c2b376f
·
1 Parent(s): d1edbc7

Changes in app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -11
app.py CHANGED
@@ -13,17 +13,18 @@ model = "meta-llama/Llama-2-13b-chat-hf"
13
  #Not Working
14
  #tokenizer = AutoTokenizer.from_pretrained(model)
15
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- pipeline = transformers.pipeline(
18
- "text-generation",
19
- model=model,
20
- torch_dtype=torch.float32,
21
- device_map="auto",
22
- do_sample=True,
23
- token=HF_TOKEN,
24
- )
25
-
26
- def get_llama_response(prompt):
27
 
28
  sequences = pipeline(
29
  prompt,
@@ -33,8 +34,10 @@ def get_llama_response(prompt):
33
  max_length=256,
34
  )
35
  print(sequences[0]['generated_text'])
 
36
 
 
37
  #prompt="Can you help me to write rest api endpoints in python ?"
38
- response = get_llama_response(prompt)
39
 
40
  st.write('Answer: ',response)
 
13
  #Not Working
14
  #tokenizer = AutoTokenizer.from_pretrained(model)
15
 
16
+ def load_model(model):
17
+ pipeline = transformers.pipeline(
18
+ "text-generation",
19
+ model=model,
20
+ torch_dtype=torch.float32,
21
+ device_map="auto",
22
+ do_sample=True,
23
+ token=HF_TOKEN,
24
+ )
25
+ return pipeline
26
 
27
+ def get_llama_response(pipeline,prompt):
 
 
 
 
 
 
 
 
 
28
 
29
  sequences = pipeline(
30
  prompt,
 
34
  max_length=256,
35
  )
36
  print(sequences[0]['generated_text'])
37
+
38
 
39
+ pipeline = AutoTokenizer.from_pretrained(model)
40
  #prompt="Can you help me to write rest api endpoints in python ?"
41
+ response = get_llama_response(pipeline,prompt)
42
 
43
  st.write('Answer: ',response)