roshithindia commited on
Commit
c509f65
·
1 Parent(s): 6a2acfa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -1,14 +1,13 @@
1
- import streamlit as st
2
- from main import generate_text
3
  from transformers import TFGPT2LMHeadModel ,GPT2Tokenizer, BitsAndBytesConfig
4
  import tensorflow as tf
5
 
6
  tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
7
  model = TFGPT2LMHeadModel.from_pretrained('gpt2',pad_token_id = tokenizer.eos_token_id)
8
 
9
- def generate_text(inp):
10
  input_ids = tokenizer.encode(inp,return_tensors = 'tf')
11
- beam_output = model.generate(input_ids, max_length = 100,num_beams = 5, no_repeat_ngram_size = 2, early_stopping = True)
12
  output = tokenizer.decode(beam_output[0],skip_special_tokens = True, clean_up_tokenization_spaces = True)
13
  return ".".join(output.split(".")[:-1]) + "."
14
 
@@ -28,7 +27,7 @@ if prompt:
28
  with st.chat_message("user"):
29
  st.markdown(prompt)
30
  st.session_state.messages.append({"role":"user","content":prompt})
31
- response = f"ChatBot: {generate_text(prompt)}"
32
  with st.chat_message("assistant"):
33
  st.markdown(response)
34
  st.session_state.messages.append({"role":"assistant","content":response})
 
1
+ import streamlit as st
 
2
  from transformers import TFGPT2LMHeadModel ,GPT2Tokenizer, BitsAndBytesConfig
3
  import tensorflow as tf
4
 
5
  tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
6
  model = TFGPT2LMHeadModel.from_pretrained('gpt2',pad_token_id = tokenizer.eos_token_id)
7
 
8
+ def generate(inp):
9
  input_ids = tokenizer.encode(inp,return_tensors = 'tf')
10
+ beam_output = model.generate(input_ids, max_length = 300,num_beams = 5, no_repeat_ngram_size = 2, early_stopping = True)
11
  output = tokenizer.decode(beam_output[0],skip_special_tokens = True, clean_up_tokenization_spaces = True)
12
  return ".".join(output.split(".")[:-1]) + "."
13
 
 
27
  with st.chat_message("user"):
28
  st.markdown(prompt)
29
  st.session_state.messages.append({"role":"user","content":prompt})
30
+ response = f"ChatBot: {generate(prompt)}"
31
  with st.chat_message("assistant"):
32
  st.markdown(response)
33
  st.session_state.messages.append({"role":"assistant","content":response})