Pippoz commited on
Commit
2446aa4
·
1 Parent(s): 14070cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -10,9 +10,9 @@ import torch
10
  # model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b", torch_dtype=torch.float16).cuda()
11
  # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
12
  # return model, tokenizer
13
- st.markdown('## OPT-1.3 Bilion parameter')
14
 
15
- with st.spinner('Loading OPT-1.3b Model...'):
16
  generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
17
  st.success('Model loaded correctly!')
18
 
@@ -29,7 +29,7 @@ with st.spinner('Loading OPT-1.3b Model...'):
29
  prompt= st.text_area('Your prompt here',
30
  '''Hello, I'm am conscious and''')
31
 
32
- answer = generator(prompt, max_length=100, no_repeat_ngram_size=3, early_stopping=True, num_beams=10)
33
 
34
  #answer = opt_model(prompt, model, tokenizer,)
35
  #lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
 
10
  # model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b", torch_dtype=torch.float16).cuda()
11
  # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
12
  # return model, tokenizer
13
+ st.markdown('## OPT-1.3 Billion parameter (Meta)')
14
 
15
+ with st.spinner('Loading Model... (This may take a while)'):
16
  generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
17
  st.success('Model loaded correctly!')
18
 
 
29
  prompt= st.text_area('Your prompt here',
30
  '''Hello, I'm am conscious and''')
31
 
32
+ answer = generator(prompt, max_length=100, no_repeat_ngram_size=3, early_stopping=True, num_beams=5)
33
 
34
  #answer = opt_model(prompt, model, tokenizer,)
35
  #lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']