Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,6 +17,7 @@ class SystemMessage:
|
|
| 17 |
model_name = "facebook/bart-base" # Replace with your choice (e.g.,t5-small or facebook/bart-base or EleutherAI/gpt-neo-125M)
|
| 18 |
|
| 19 |
# Load the model and tokenizer
|
|
|
|
| 20 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 21 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
| 22 |
|
|
@@ -40,14 +41,12 @@ class AIMessage:
|
|
| 40 |
|
| 41 |
|
| 42 |
def load_answer(question):
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
st.session_state.sessionMessages.append(AIMessage(content=assistant_answer.content))
|
| 50 |
-
return assistant_answer.content
|
| 51 |
|
| 52 |
|
| 53 |
|
|
@@ -59,7 +58,6 @@ def get_text():
|
|
| 59 |
|
| 60 |
|
| 61 |
|
| 62 |
-
|
| 63 |
user_input=get_text()
|
| 64 |
submit = st.button('Generate')
|
| 65 |
|
|
|
|
| 17 |
model_name = "facebook/bart-base" # Replace with your choice (e.g.,t5-small or facebook/bart-base or EleutherAI/gpt-neo-125M)
|
| 18 |
|
| 19 |
# Load the model and tokenizer
|
| 20 |
+
|
| 21 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 22 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
| 23 |
|
|
|
|
| 41 |
|
| 42 |
|
| 43 |
def load_answer(question):
|
| 44 |
+
st.session_state.sessionMessages.append(HumanMessage(content=question))
|
| 45 |
+
inputs = tokenizer(question, return_tensors="pt") # Tokenize the question
|
| 46 |
+
outputs = model.generate(**inputs) # Generate response based on tokenized input
|
| 47 |
+
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True) # Decode the output
|
| 48 |
+
st.session_state.sessionMessages.append(AIMessage(content=assistant_answer.content))
|
| 49 |
+
return assistant_answer.content
|
|
|
|
|
|
|
| 50 |
|
| 51 |
|
| 52 |
|
|
|
|
| 58 |
|
| 59 |
|
| 60 |
|
|
|
|
| 61 |
user_input=get_text()
|
| 62 |
submit = st.button('Generate')
|
| 63 |
|