PyaeSoneK commited on
Commit
ed42b4d
·
1 Parent(s): 5bedd17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -7,16 +7,19 @@ import torch
7
  import transformers
8
  from transformers import AutoTokenizer, AutoModelForCausalLM
9
 
 
 
 
10
 
11
- offload_folder = 'C:\model_weights'
12
 
 
13
 
14
  model = AutoModelForCausalLM.from_pretrained("PyaeSoneK/LlamaV2LegalFineTuned",
15
  device_map='auto',
16
  torch_dtype=torch.float16,
17
  use_auth_token= st.secrets['hf_access_token'],
18
  offload_folder=offload_folder,
19
- low_cpu_mem_usage=True)
20
  # load_in_4bit=True
21
 
22
  tokenizer = AutoTokenizer.from_pretrained("PyaeSoneK/LlamaV2LegalFineTuned",
@@ -103,14 +106,16 @@ template = get_prompt(instruction, system_prompt)
103
  print(template)
104
 
105
 
106
- # App framework
107
- st.title('🦜Seon\'s Legal QA For Dummies 🔗 ')
108
- prompt = PromptTemplate(template=template, input_variables=["text"])
109
  llm_chain = LLMChain(prompt=prompt, llm=llm)
110
 
111
 
112
  text = st.text_input('Plug in your prompt here')
113
  # Instantiate the prompt template # this will show stuff to the screen if there's a prompt
 
114
  if text:
115
  response = llm_chain.run(text)
116
- st.write(parse_text(response))
 
 
 
 
7
  import transformers
8
  from transformers import AutoTokenizer, AutoModelForCausalLM
9
 
10
+ # App framework
11
+ st.title('🦜Seon\'s Legal QA For Dummies 🔗 ')
12
+ prompt = PromptTemplate(template=template, input_variables=["text"])
13
 
 
14
 
15
+ offload_folder = 'C:\model_weights'
16
 
17
  model = AutoModelForCausalLM.from_pretrained("PyaeSoneK/LlamaV2LegalFineTuned",
18
  device_map='auto',
19
  torch_dtype=torch.float16,
20
  use_auth_token= st.secrets['hf_access_token'],
21
  offload_folder=offload_folder,
22
+ )
23
  # load_in_4bit=True
24
 
25
  tokenizer = AutoTokenizer.from_pretrained("PyaeSoneK/LlamaV2LegalFineTuned",
 
106
  print(template)
107
 
108
 
109
+
 
 
110
  llm_chain = LLMChain(prompt=prompt, llm=llm)
111
 
112
 
113
  text = st.text_input('Plug in your prompt here')
114
  # Instantiate the prompt template # this will show stuff to the screen if there's a prompt
115
+
116
  if text:
117
  response = llm_chain.run(text)
118
+ st.write(parse_text(response))
119
+
120
+
121
+